patch
stringlengths 17
31.2k
| y
int64 1
1
| oldf
stringlengths 0
2.21M
| idx
int64 1
1
| id
int64 4.29k
68.4k
| msg
stringlengths 8
843
| proj
stringclasses 212
values | lang
stringclasses 9
values |
---|---|---|---|---|---|---|---|
@@ -1,7 +1,10 @@
class Video < ActiveRecord::Base
+ extend FriendlyId
+
belongs_to :watchable, polymorphic: true
validates :published_on, presence: true
+ validates :slug, presence: true, uniqueness: true
validates :title, presence: true
validates :watchable_id, presence: true
validates :watchable_type, presence: true | 1 | class Video < ActiveRecord::Base
belongs_to :watchable, polymorphic: true
validates :published_on, presence: true
validates :title, presence: true
validates :watchable_id, presence: true
validates :watchable_type, presence: true
validates :wistia_id, presence: true
delegate :included_in_plan?, to: :watchable
delegate :name, to: :watchable, prefix: true
def self.ordered
order('position asc')
end
def self.published
where('published_on <= ?', Time.zone.today)
end
def self.recently_published_first
order('published_on desc')
end
def clip
@video ||= Clip.new(wistia_id)
end
def preview
if preview_wistia_id.present?
Clip.new(preview_wistia_id)
else
VideoThumbnail.new(clip)
end
end
def has_notes?
notes.present?
end
def notes_html
BlueCloth.new(notes).to_html
end
end
| 1 | 10,763 | There's a good bit of class-level stuff that's repeated in our various product types. Think it's worth extracting a...dare I say it...module? | thoughtbot-upcase | rb |
@@ -79,7 +79,10 @@ func Build(pkgName, outpath string, config *compileopts.Config, action func(Buil
// keep functions interoperable, pass int64 types as pointers to
// stack-allocated values.
// Use -wasm-abi=generic to disable this behaviour.
- if config.Options.WasmAbi == "js" && strings.HasPrefix(config.Triple(), "wasm") {
+ if config.Options.WasmAbi != "" {
+ config.Target.WasmAbi = config.Options.WasmAbi
+ }
+ if config.Target.WasmAbi == "js" {
err := transform.ExternalInt64AsPtr(mod)
if err != nil {
return err | 1 | // Package builder is the compiler driver of TinyGo. It takes in a package name
// and an output path, and outputs an executable. It manages the entire
// compilation pipeline in between.
package builder
import (
"debug/elf"
"encoding/binary"
"errors"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"sort"
"strconv"
"strings"
"github.com/tinygo-org/tinygo/compileopts"
"github.com/tinygo-org/tinygo/compiler"
"github.com/tinygo-org/tinygo/goenv"
"github.com/tinygo-org/tinygo/interp"
"github.com/tinygo-org/tinygo/stacksize"
"github.com/tinygo-org/tinygo/transform"
"tinygo.org/x/go-llvm"
)
// BuildResult is the output of a build. This includes the binary itself and
// some other metadata that is obtained while building the binary.
type BuildResult struct {
// A path to the output binary. It will be removed after Build returns, so
// if it should be kept it must be copied or moved away.
Binary string
// The directory of the main package. This is useful for testing as the test
// binary must be run in the directory of the tested package.
MainDir string
}
// Build performs a single package to executable Go build. It takes in a package
// name, an output path, and set of compile options and from that it manages the
// whole compilation process.
//
// The error value may be of type *MultiError. Callers will likely want to check
// for this case and print such errors individually.
func Build(pkgName, outpath string, config *compileopts.Config, action func(BuildResult) error) error {
// Compile Go code to IR.
machine, err := compiler.NewTargetMachine(config)
if err != nil {
return err
}
buildOutput, errs := compiler.Compile(pkgName, machine, config)
if errs != nil {
return newMultiError(errs)
}
mod := buildOutput.Mod
if config.Options.PrintIR {
fmt.Println("; Generated LLVM IR:")
fmt.Println(mod.String())
}
if err := llvm.VerifyModule(mod, llvm.PrintMessageAction); err != nil {
return errors.New("verification error after IR construction")
}
err = interp.Run(mod, config.DumpSSA())
if err != nil {
return err
}
if err := llvm.VerifyModule(mod, llvm.PrintMessageAction); err != nil {
return errors.New("verification error after interpreting runtime.initAll")
}
if config.GOOS() != "darwin" {
transform.ApplyFunctionSections(mod) // -ffunction-sections
}
// Browsers cannot handle external functions that have type i64 because it
// cannot be represented exactly in JavaScript (JS only has doubles). To
// keep functions interoperable, pass int64 types as pointers to
// stack-allocated values.
// Use -wasm-abi=generic to disable this behaviour.
if config.Options.WasmAbi == "js" && strings.HasPrefix(config.Triple(), "wasm") {
err := transform.ExternalInt64AsPtr(mod)
if err != nil {
return err
}
}
// Optimization levels here are roughly the same as Clang, but probably not
// exactly.
errs = nil
switch config.Options.Opt {
/*
Currently, turning optimizations off causes compile failures.
We rely on the optimizer removing some dead symbols.
Avoid providing an option that does not work right now.
In the future once everything has been fixed we can re-enable this.
case "none", "0":
errs = transform.Optimize(mod, config, 0, 0, 0) // -O0
*/
case "1":
errs = transform.Optimize(mod, config, 1, 0, 0) // -O1
case "2":
errs = transform.Optimize(mod, config, 2, 0, 225) // -O2
case "s":
errs = transform.Optimize(mod, config, 2, 1, 225) // -Os
case "z":
errs = transform.Optimize(mod, config, 2, 2, 5) // -Oz, default
default:
errs = []error{errors.New("unknown optimization level: -opt=" + config.Options.Opt)}
}
if len(errs) > 0 {
return newMultiError(errs)
}
if err := llvm.VerifyModule(mod, llvm.PrintMessageAction); err != nil {
return errors.New("verification failure after LLVM optimization passes")
}
// On the AVR, pointers can point either to flash or to RAM, but we don't
// know. As a temporary fix, load all global variables in RAM.
// In the future, there should be a compiler pass that determines which
// pointers are flash and which are in RAM so that pointers can have a
// correct address space parameter (address space 1 is for flash).
if strings.HasPrefix(config.Triple(), "avr") {
transform.NonConstGlobals(mod)
if err := llvm.VerifyModule(mod, llvm.PrintMessageAction); err != nil {
return errors.New("verification error after making all globals non-constant on AVR")
}
}
// Make sure stack sizes are loaded from a separate section so they can be
// modified after linking.
var stackSizeLoads []string
if config.AutomaticStackSize() {
stackSizeLoads = transform.CreateStackSizeLoads(mod, config)
}
// Generate output.
outext := filepath.Ext(outpath)
switch outext {
case ".o":
llvmBuf, err := machine.EmitToMemoryBuffer(mod, llvm.ObjectFile)
if err != nil {
return err
}
return ioutil.WriteFile(outpath, llvmBuf.Bytes(), 0666)
case ".bc":
data := llvm.WriteBitcodeToMemoryBuffer(mod).Bytes()
return ioutil.WriteFile(outpath, data, 0666)
case ".ll":
data := []byte(mod.String())
return ioutil.WriteFile(outpath, data, 0666)
default:
// Act as a compiler driver.
// Create a temporary directory for intermediary files.
dir, err := ioutil.TempDir("", "tinygo")
if err != nil {
return err
}
defer os.RemoveAll(dir)
// Write the object file.
objfile := filepath.Join(dir, "main.o")
llvmBuf, err := machine.EmitToMemoryBuffer(mod, llvm.ObjectFile)
if err != nil {
return err
}
err = ioutil.WriteFile(objfile, llvmBuf.Bytes(), 0666)
if err != nil {
return err
}
// Prepare link command.
executable := filepath.Join(dir, "main")
tmppath := executable // final file
ldflags := append(config.LDFlags(), "-o", executable, objfile)
// Load builtins library from the cache, possibly compiling it on the
// fly.
if config.Target.RTLib == "compiler-rt" {
librt, err := CompilerRT.Load(config.Triple())
if err != nil {
return err
}
ldflags = append(ldflags, librt)
}
// Add libc.
if config.Target.Libc == "picolibc" {
libc, err := Picolibc.Load(config.Triple())
if err != nil {
return err
}
ldflags = append(ldflags, libc)
}
// Compile extra files.
root := goenv.Get("TINYGOROOT")
for i, path := range config.ExtraFiles() {
abspath := filepath.Join(root, path)
outpath := filepath.Join(dir, "extra-"+strconv.Itoa(i)+"-"+filepath.Base(path)+".o")
err := runCCompiler(config.Target.Compiler, append(config.CFlags(), "-c", "-o", outpath, abspath)...)
if err != nil {
return &commandError{"failed to build", path, err}
}
ldflags = append(ldflags, outpath)
}
// Compile C files in packages.
for i, file := range buildOutput.ExtraFiles {
outpath := filepath.Join(dir, "pkg"+strconv.Itoa(i)+"-"+filepath.Base(file)+".o")
err := runCCompiler(config.Target.Compiler, append(config.CFlags(), "-c", "-o", outpath, file)...)
if err != nil {
return &commandError{"failed to build", file, err}
}
ldflags = append(ldflags, outpath)
}
if len(buildOutput.ExtraLDFlags) > 0 {
ldflags = append(ldflags, buildOutput.ExtraLDFlags...)
}
// Link the object files together.
err = link(config.Target.Linker, ldflags...)
if err != nil {
return &commandError{"failed to link", executable, err}
}
var calculatedStacks []string
var stackSizes map[string]functionStackSize
if config.Options.PrintStacks || config.AutomaticStackSize() {
// Try to determine stack sizes at compile time.
// Don't do this by default as it usually doesn't work on
// unsupported architectures.
calculatedStacks, stackSizes, err = determineStackSizes(mod, executable)
if err != nil {
return err
}
}
if config.AutomaticStackSize() {
// Modify the .tinygo_stacksizes section that contains a stack size
// for each goroutine.
err = modifyStackSizes(executable, stackSizeLoads, stackSizes)
if err != nil {
return fmt.Errorf("could not modify stack sizes: %w", err)
}
}
if config.Options.PrintSizes == "short" || config.Options.PrintSizes == "full" {
sizes, err := loadProgramSize(executable)
if err != nil {
return err
}
if config.Options.PrintSizes == "short" {
fmt.Printf(" code data bss | flash ram\n")
fmt.Printf("%7d %7d %7d | %7d %7d\n", sizes.Code, sizes.Data, sizes.BSS, sizes.Code+sizes.Data, sizes.Data+sizes.BSS)
} else {
fmt.Printf(" code rodata data bss | flash ram | package\n")
for _, name := range sizes.sortedPackageNames() {
pkgSize := sizes.Packages[name]
fmt.Printf("%7d %7d %7d %7d | %7d %7d | %s\n", pkgSize.Code, pkgSize.ROData, pkgSize.Data, pkgSize.BSS, pkgSize.Flash(), pkgSize.RAM(), name)
}
fmt.Printf("%7d %7d %7d %7d | %7d %7d | (sum)\n", sizes.Sum.Code, sizes.Sum.ROData, sizes.Sum.Data, sizes.Sum.BSS, sizes.Sum.Flash(), sizes.Sum.RAM())
fmt.Printf("%7d - %7d %7d | %7d %7d | (all)\n", sizes.Code, sizes.Data, sizes.BSS, sizes.Code+sizes.Data, sizes.Data+sizes.BSS)
}
}
// Print goroutine stack sizes, as far as possible.
if config.Options.PrintStacks {
printStacks(calculatedStacks, stackSizes)
}
// Get an Intel .hex file or .bin file from the .elf file.
outputBinaryFormat := config.BinaryFormat(outext)
switch outputBinaryFormat {
case "elf":
// do nothing, file is already in ELF format
case "hex", "bin":
// Extract raw binary, either encoding it as a hex file or as a raw
// firmware file.
tmppath = filepath.Join(dir, "main"+outext)
err := objcopy(executable, tmppath, outputBinaryFormat)
if err != nil {
return err
}
case "uf2":
// Get UF2 from the .elf file.
tmppath = filepath.Join(dir, "main"+outext)
err := convertELFFileToUF2File(executable, tmppath, config.Target.UF2FamilyID)
if err != nil {
return err
}
case "esp32", "esp8266":
// Special format for the ESP family of chips (parsed by the ROM
// bootloader).
tmppath = filepath.Join(dir, "main"+outext)
err := makeESPFirmareImage(executable, tmppath, outputBinaryFormat)
if err != nil {
return err
}
default:
return fmt.Errorf("unknown output binary format: %s", outputBinaryFormat)
}
return action(BuildResult{
Binary: tmppath,
MainDir: buildOutput.MainDir,
})
}
}
// functionStackSizes keeps stack size information about a single function
// (usually a goroutine).
type functionStackSize struct {
humanName string
stackSize uint64
stackSizeType stacksize.SizeType
missingStackSize *stacksize.CallNode
}
// determineStackSizes tries to determine the stack sizes of all started
// goroutines and of the reset vector. The LLVM module is necessary to find
// functions that call a function pointer.
func determineStackSizes(mod llvm.Module, executable string) ([]string, map[string]functionStackSize, error) {
var callsIndirectFunction []string
gowrappers := []string{}
gowrapperNames := make(map[string]string)
for fn := mod.FirstFunction(); !fn.IsNil(); fn = llvm.NextFunction(fn) {
// Determine which functions call a function pointer.
for bb := fn.FirstBasicBlock(); !bb.IsNil(); bb = llvm.NextBasicBlock(bb) {
for inst := bb.FirstInstruction(); !inst.IsNil(); inst = llvm.NextInstruction(inst) {
if inst.IsACallInst().IsNil() {
continue
}
if callee := inst.CalledValue(); callee.IsAFunction().IsNil() && callee.IsAInlineAsm().IsNil() {
callsIndirectFunction = append(callsIndirectFunction, fn.Name())
}
}
}
// Get a list of "go wrappers", small wrapper functions that decode
// parameters when starting a new goroutine.
attr := fn.GetStringAttributeAtIndex(-1, "tinygo-gowrapper")
if !attr.IsNil() {
gowrappers = append(gowrappers, fn.Name())
gowrapperNames[fn.Name()] = attr.GetStringValue()
}
}
sort.Strings(gowrappers)
// Load the ELF binary.
f, err := elf.Open(executable)
if err != nil {
return nil, nil, fmt.Errorf("could not load executable for stack size analysis: %w", err)
}
defer f.Close()
// Determine the frame size of each function (if available) and the callgraph.
functions, err := stacksize.CallGraph(f, callsIndirectFunction)
if err != nil {
return nil, nil, fmt.Errorf("could not parse executable for stack size analysis: %w", err)
}
// Goroutines need to be started and finished and take up some stack space
// that way. This can be measured by measuing the stack size of
// tinygo_startTask.
if numFuncs := len(functions["tinygo_startTask"]); numFuncs != 1 {
return nil, nil, fmt.Errorf("expected exactly one definition of tinygo_startTask, got %d", numFuncs)
}
baseStackSize, baseStackSizeType, baseStackSizeFailedAt := functions["tinygo_startTask"][0].StackSize()
sizes := make(map[string]functionStackSize)
// Add the reset handler function, for convenience. The reset handler runs
// startup code and the scheduler. The listed stack size is not the full
// stack size: interrupts are not counted.
var resetFunction string
switch f.Machine {
case elf.EM_ARM:
// Note: all interrupts happen on this stack so the real size is bigger.
resetFunction = "Reset_Handler"
}
if resetFunction != "" {
funcs := functions[resetFunction]
if len(funcs) != 1 {
return nil, nil, fmt.Errorf("expected exactly one definition of %s in the callgraph, found %d", resetFunction, len(funcs))
}
stackSize, stackSizeType, missingStackSize := funcs[0].StackSize()
sizes[resetFunction] = functionStackSize{
stackSize: stackSize,
stackSizeType: stackSizeType,
missingStackSize: missingStackSize,
humanName: resetFunction,
}
}
// Add all goroutine wrapper functions.
for _, name := range gowrappers {
funcs := functions[name]
if len(funcs) != 1 {
return nil, nil, fmt.Errorf("expected exactly one definition of %s in the callgraph, found %d", name, len(funcs))
}
humanName := gowrapperNames[name]
if humanName == "" {
humanName = name // fallback
}
stackSize, stackSizeType, missingStackSize := funcs[0].StackSize()
if baseStackSizeType != stacksize.Bounded {
// It was not possible to determine the stack size at compile time
// because tinygo_startTask does not have a fixed stack size. This
// can happen when using -opt=1.
stackSizeType = baseStackSizeType
missingStackSize = baseStackSizeFailedAt
} else if stackSize < baseStackSize {
// This goroutine has a very small stack, but still needs to fit all
// registers to start and suspend the goroutine. Otherwise a stack
// overflow will occur even before the goroutine is started.
stackSize = baseStackSize
}
sizes[name] = functionStackSize{
stackSize: stackSize,
stackSizeType: stackSizeType,
missingStackSize: missingStackSize,
humanName: humanName,
}
}
if resetFunction != "" {
return append([]string{resetFunction}, gowrappers...), sizes, nil
}
return gowrappers, sizes, nil
}
// modifyStackSizes modifies the .tinygo_stacksizes section with the updated
// stack size information. Before this modification, all stack sizes in the
// section assume the default stack size (which is relatively big).
func modifyStackSizes(executable string, stackSizeLoads []string, stackSizes map[string]functionStackSize) error {
fp, err := os.OpenFile(executable, os.O_RDWR, 0)
if err != nil {
return err
}
defer fp.Close()
elfFile, err := elf.NewFile(fp)
if err != nil {
return err
}
section := elfFile.Section(".tinygo_stacksizes")
if section == nil {
return errors.New("could not find .tinygo_stacksizes section")
}
if section.Size != section.FileSize {
// Sanity check.
return fmt.Errorf("expected .tinygo_stacksizes to have identical size and file size, got %d and %d", section.Size, section.FileSize)
}
// Read all goroutine stack sizes.
data := make([]byte, section.Size)
_, err = fp.ReadAt(data, int64(section.Offset))
if err != nil {
return err
}
if len(stackSizeLoads)*4 != len(data) {
// Note: while AVR should use 2 byte stack sizes, even 64-bit platforms
// should probably stick to 4 byte stack sizes as a larger than 4GB
// stack doesn't make much sense.
return errors.New("expected 4 byte stack sizes")
}
// Modify goroutine stack sizes with a compile-time known worst case stack
// size.
for i, name := range stackSizeLoads {
fn, ok := stackSizes[name]
if !ok {
return fmt.Errorf("could not find symbol %s in ELF file", name)
}
if fn.stackSizeType == stacksize.Bounded {
stackSize := uint32(fn.stackSize)
// Adding 4 for the stack canary. Even though the size may be
// automatically determined, stack overflow checking is still
// important as the stack size cannot be determined for all
// goroutines.
stackSize += 4
// Add stack size used by interrupts.
switch elfFile.Machine {
case elf.EM_ARM:
// On Cortex-M (assumed here), this stack size is 8 words or 32
// bytes. This is only to store the registers that the interrupt
// may modify, the interrupt will switch to the interrupt stack
// (MSP).
// Some background:
// https://interrupt.memfault.com/blog/cortex-m-rtos-context-switching
stackSize += 32
}
// Finally write the stack size to the binary.
binary.LittleEndian.PutUint32(data[i*4:], stackSize)
}
}
// Write back the modified stack sizes.
_, err = fp.WriteAt(data, int64(section.Offset))
if err != nil {
return err
}
return nil
}
// printStacks prints the maximum stack depth for functions that are started as
// goroutines. Stack sizes cannot always be determined statically, in particular
// recursive functions and functions that call interface methods or function
// pointers may have an unknown stack depth (depending on what the optimizer
// manages to optimize away).
//
// It might print something like the following:
//
// function stack usage (in bytes)
// Reset_Handler 316
// examples/blinky2.led1 92
// runtime.run$1 300
func printStacks(calculatedStacks []string, stackSizes map[string]functionStackSize) {
// Print the sizes of all stacks.
fmt.Printf("%-32s %s\n", "function", "stack usage (in bytes)")
for _, name := range calculatedStacks {
fn := stackSizes[name]
switch fn.stackSizeType {
case stacksize.Bounded:
fmt.Printf("%-32s %d\n", fn.humanName, fn.stackSize)
case stacksize.Unknown:
fmt.Printf("%-32s unknown, %s does not have stack frame information\n", fn.humanName, fn.missingStackSize)
case stacksize.Recursive:
fmt.Printf("%-32s recursive, %s may call itself\n", fn.humanName, fn.missingStackSize)
case stacksize.IndirectCall:
fmt.Printf("%-32s unknown, %s calls a function pointer\n", fn.humanName, fn.missingStackSize)
}
}
}
| 1 | 10,841 | Please do not modify the `config.Target` field, it should contain exactly what is extracted from the JSON files. Instead, you can either: * Add a getter to the `config` variable (`*compileopts.Config`), so you can simply call `config.WasmAbi()` to get the value. * Use a local variable instead. The getter would be slightly cleaner, as it matches the pattern of other configurations (`CGO_ENABLED`, `GC`, `NeedsStackObjects`, etc). | tinygo-org-tinygo | go |
@@ -159,6 +159,14 @@ func newTestEnv(inboundOptions []InboundOption, outboundOptions []OutboundOption
testRouter := newTestRouter(procedures)
t := NewTransport()
+ if err := t.Start(); err != nil {
+ return nil, err
+ }
+ defer func() {
+ if err != nil {
+ err = multierr.Append(err, t.Stop())
+ }
+ }()
listener, err := net.Listen("tcp", "127.0.0.1:0")
if err != nil { | 1 | // Copyright (c) 2017 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package grpc
import (
"context"
"fmt"
"net"
"strings"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"go.uber.org/multierr"
"go.uber.org/yarpc/api/transport"
"go.uber.org/yarpc/encoding/protobuf"
"go.uber.org/yarpc/internal/clientconfig"
"go.uber.org/yarpc/internal/examples/protobuf/example"
"go.uber.org/yarpc/internal/examples/protobuf/examplepb"
"go.uber.org/yarpc/internal/testtime"
"go.uber.org/yarpc/transport/x/grpc/grpcheader"
"go.uber.org/yarpc/yarpcerrors"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/status"
)
func TestYARPCBasic(t *testing.T) {
t.Parallel()
doWithTestEnv(t, nil, nil, func(t *testing.T, e *testEnv) {
_, err := e.GetValueYARPC(context.Background(), "foo")
assert.Equal(t, yarpcerrors.NotFoundErrorf("foo"), err)
assert.NoError(t, e.SetValueYARPC(context.Background(), "foo", "bar"))
value, err := e.GetValueYARPC(context.Background(), "foo")
assert.NoError(t, err)
assert.Equal(t, "bar", value)
})
}
func TestGRPCBasic(t *testing.T) {
t.Parallel()
doWithTestEnv(t, nil, nil, func(t *testing.T, e *testEnv) {
_, err := e.GetValueGRPC(context.Background(), "foo")
assert.Equal(t, status.Error(codes.NotFound, "foo"), err)
assert.NoError(t, e.SetValueGRPC(context.Background(), "foo", "bar"))
value, err := e.GetValueGRPC(context.Background(), "foo")
assert.NoError(t, err)
assert.Equal(t, "bar", value)
})
}
func TestYARPCMetadata(t *testing.T) {
t.Parallel()
var md metadata.MD
doWithTestEnv(t, []InboundOption{withInboundUnaryInterceptor(newMetadataUnaryServerInterceptor(&md))}, nil, func(t *testing.T, e *testEnv) {
assert.NoError(t, e.SetValueYARPC(context.Background(), "foo", "bar"))
assert.Len(t, md["user-agent"], 1)
assert.True(t, strings.Contains(md["user-agent"][0], UserAgent))
})
}
func TestYARPCWellKnownError(t *testing.T) {
t.Parallel()
doWithTestEnv(t, nil, nil, func(t *testing.T, e *testEnv) {
e.KeyValueYARPCServer.SetNextError(status.Error(codes.FailedPrecondition, "bar 1"))
_, err := e.GetValueYARPC(context.Background(), "foo")
assert.Equal(t, yarpcerrors.FailedPreconditionErrorf("bar 1"), err)
})
}
func TestYARPCNamedError(t *testing.T) {
t.Parallel()
doWithTestEnv(t, nil, nil, func(t *testing.T, e *testEnv) {
e.KeyValueYARPCServer.SetNextError(yarpcerrors.NamedErrorf("bar", "baz 1"))
_, err := e.GetValueYARPC(context.Background(), "foo")
assert.Equal(t, yarpcerrors.NamedErrorf("bar", "baz 1"), err)
})
}
func TestYARPCNamedErrorNoMessage(t *testing.T) {
t.Parallel()
doWithTestEnv(t, nil, nil, func(t *testing.T, e *testEnv) {
e.KeyValueYARPCServer.SetNextError(yarpcerrors.NamedErrorf("bar", ""))
_, err := e.GetValueYARPC(context.Background(), "foo")
assert.Equal(t, yarpcerrors.NamedErrorf("bar", ""), err)
})
}
func TestGRPCWellKnownError(t *testing.T) {
t.Parallel()
doWithTestEnv(t, nil, nil, func(t *testing.T, e *testEnv) {
e.KeyValueYARPCServer.SetNextError(status.Error(codes.FailedPrecondition, "bar 1"))
_, err := e.GetValueGRPC(context.Background(), "foo")
assert.Equal(t, status.Error(codes.FailedPrecondition, "bar 1"), err)
})
}
func TestGRPCNamedError(t *testing.T) {
t.Parallel()
doWithTestEnv(t, nil, nil, func(t *testing.T, e *testEnv) {
e.KeyValueYARPCServer.SetNextError(yarpcerrors.NamedErrorf("bar", "baz 1"))
_, err := e.GetValueGRPC(context.Background(), "foo")
assert.Equal(t, status.Error(codes.Unknown, "bar: baz 1"), err)
})
}
func TestGRPCNamedErrorNoMessage(t *testing.T) {
t.Parallel()
doWithTestEnv(t, nil, nil, func(t *testing.T, e *testEnv) {
e.KeyValueYARPCServer.SetNextError(yarpcerrors.NamedErrorf("bar", ""))
_, err := e.GetValueGRPC(context.Background(), "foo")
assert.Equal(t, status.Error(codes.Unknown, "bar"), err)
})
}
func doWithTestEnv(t *testing.T, inboundOptions []InboundOption, outboundOptions []OutboundOption, f func(*testing.T, *testEnv)) {
testEnv, err := newTestEnv(inboundOptions, outboundOptions)
require.NoError(t, err)
defer func() {
assert.NoError(t, testEnv.Close())
}()
f(t, testEnv)
}
type testEnv struct {
Inbound *Inbound
Outbound *Outbound
ClientConn *grpc.ClientConn
ContextWrapper *grpcheader.ContextWrapper
ClientConfig transport.ClientConfig
Procedures []transport.Procedure
KeyValueGRPCClient examplepb.KeyValueClient
KeyValueYARPCClient examplepb.KeyValueYARPCClient
KeyValueYARPCServer *example.KeyValueYARPCServer
}
func newTestEnv(inboundOptions []InboundOption, outboundOptions []OutboundOption) (_ *testEnv, err error) {
keyValueYARPCServer := example.NewKeyValueYARPCServer()
procedures := examplepb.BuildKeyValueYARPCProcedures(keyValueYARPCServer)
testRouter := newTestRouter(procedures)
t := NewTransport()
listener, err := net.Listen("tcp", "127.0.0.1:0")
if err != nil {
return nil, err
}
inbound := t.NewInbound(listener, inboundOptions...)
inbound.SetRouter(testRouter)
if err := inbound.Start(); err != nil {
return nil, err
}
defer func() {
if err != nil {
err = multierr.Append(err, inbound.Stop())
}
}()
clientConn, err := grpc.Dial(listener.Addr().String(), grpc.WithInsecure())
if err != nil {
return nil, err
}
defer func() {
if err != nil {
err = multierr.Append(err, clientConn.Close())
}
}()
keyValueClient := examplepb.NewKeyValueClient(clientConn)
outbound := t.NewSingleOutbound(listener.Addr().String(), outboundOptions...)
if err := outbound.Start(); err != nil {
return nil, err
}
defer func() {
if err != nil {
err = multierr.Append(err, outbound.Stop())
}
}()
clientConfig := clientconfig.MultiOutbound(
"example-client",
"example",
transport.Outbounds{
ServiceName: "example-client",
Unary: outbound,
},
)
keyValueYARPCClient := examplepb.NewKeyValueYARPCClient(clientConfig)
contextWrapper := grpcheader.NewContextWrapper().
WithCaller("example-client").
WithService("example").
WithEncoding(string(protobuf.Encoding))
return &testEnv{
inbound,
outbound,
clientConn,
contextWrapper,
clientConfig,
procedures,
keyValueClient,
keyValueYARPCClient,
keyValueYARPCServer,
}, nil
}
func (e *testEnv) GetValueYARPC(ctx context.Context, key string) (string, error) {
ctx, cancel := context.WithTimeout(ctx, testtime.Second)
defer cancel()
response, err := e.KeyValueYARPCClient.GetValue(ctx, &examplepb.GetValueRequest{key})
if err != nil {
return "", err
}
return response.Value, nil
}
func (e *testEnv) SetValueYARPC(ctx context.Context, key string, value string) error {
ctx, cancel := context.WithTimeout(ctx, testtime.Second)
defer cancel()
_, err := e.KeyValueYARPCClient.SetValue(ctx, &examplepb.SetValueRequest{key, value})
return err
}
func (e *testEnv) GetValueGRPC(ctx context.Context, key string) (string, error) {
ctx, cancel := context.WithTimeout(ctx, testtime.Second)
defer cancel()
response, err := e.KeyValueGRPCClient.GetValue(e.ContextWrapper.Wrap(ctx), &examplepb.GetValueRequest{key})
if err != nil {
return "", err
}
return response.Value, nil
}
func (e *testEnv) SetValueGRPC(ctx context.Context, key string, value string) error {
ctx, cancel := context.WithTimeout(ctx, testtime.Second)
defer cancel()
_, err := e.KeyValueGRPCClient.SetValue(e.ContextWrapper.Wrap(ctx), &examplepb.SetValueRequest{key, value})
return err
}
func (e *testEnv) Close() error {
return multierr.Combine(
e.ClientConn.Close(),
e.Outbound.Stop(),
e.Inbound.Stop(),
)
}
type testRouter struct {
procedures []transport.Procedure
}
func newTestRouter(procedures []transport.Procedure) *testRouter {
return &testRouter{procedures}
}
func (r *testRouter) Procedures() []transport.Procedure {
return r.procedures
}
func (r *testRouter) Choose(_ context.Context, request *transport.Request) (transport.HandlerSpec, error) {
for _, procedure := range r.procedures {
if procedure.Name == request.Procedure {
return procedure.HandlerSpec, nil
}
}
return transport.HandlerSpec{}, fmt.Errorf("no procedure for name %s", request.Procedure)
}
| 1 | 15,096 | The nil check is not necessary for these. Append checks both sides for nil. | yarpc-yarpc-go | go |
@@ -62,6 +62,7 @@ func main() {
trace.WithAttributes(attrs...),
trace.ChildOf(spanCtx),
)
+ span.SetAttributes(entries...)
defer span.End()
span.AddEvent(ctx, "handling this...") | 1 | // Copyright 2019, OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"io"
"log"
"net/http"
"go.opentelemetry.io/otel/api/distributedcontext"
"go.opentelemetry.io/otel/api/global"
"go.opentelemetry.io/otel/api/trace"
"go.opentelemetry.io/otel/exporter/trace/stdout"
"go.opentelemetry.io/otel/plugin/httptrace"
sdktrace "go.opentelemetry.io/otel/sdk/trace"
)
func initTracer() {
// Create stdout exporter to be able to retrieve
// the collected spans.
exporter, err := stdout.NewExporter(stdout.Options{PrettyPrint: true})
if err != nil {
log.Fatal(err)
}
// For the demonstration, use sdktrace.AlwaysSample sampler to sample all traces.
// In a production application, use sdktrace.ProbabilitySampler with a desired probability.
tp, err := sdktrace.NewProvider(sdktrace.WithConfig(sdktrace.Config{DefaultSampler: sdktrace.AlwaysSample()}),
sdktrace.WithSyncer(exporter))
if err != nil {
log.Fatal(err)
}
global.SetTraceProvider(tp)
}
func main() {
initTracer()
tr := global.TraceProvider().Tracer("example/server")
helloHandler := func(w http.ResponseWriter, req *http.Request) {
attrs, entries, spanCtx := httptrace.Extract(req.Context(), req)
req = req.WithContext(distributedcontext.WithMap(req.Context(), distributedcontext.NewMap(distributedcontext.MapUpdate{
MultiKV: entries,
})))
ctx, span := tr.Start(
req.Context(),
"hello",
trace.WithAttributes(attrs...),
trace.ChildOf(spanCtx),
)
defer span.End()
span.AddEvent(ctx, "handling this...")
_, _ = io.WriteString(w, "Hello, world!\n")
}
http.HandleFunc("/hello", helloHandler)
err := http.ListenAndServe(":7777", nil)
if err != nil {
panic(err)
}
}
| 1 | 10,893 | Shouldn't we instead have the SDK apply these, internally? I.e., I would expect to see the dctx entries included in the span as first-class distributed correlations, not as span attributes. | open-telemetry-opentelemetry-go | go |
@@ -60,5 +60,12 @@ func (t *topologyAPI) GetTopology(ctx context.Context, req *topologyv1.GetTopolo
}
func (t *topologyAPI) SearchTopology(ctx context.Context, req *topologyv1.SearchTopologyRequest) (*topologyv1.SearchTopologyResponse, error) {
- return nil, errors.New("not implemented")
+ resources, err := t.topology.SearchTopology(ctx, req)
+ if err != nil {
+ return nil, err
+ }
+
+ return &topologyv1.SearchTopologyResponse{
+ Resources: resources,
+ }, nil
} | 1 | package topology
import (
"context"
"errors"
"github.com/golang/protobuf/ptypes/any"
"github.com/uber-go/tally"
"go.uber.org/zap"
topologyv1 "github.com/lyft/clutch/backend/api/topology/v1"
"github.com/lyft/clutch/backend/module"
"github.com/lyft/clutch/backend/service"
topologyservice "github.com/lyft/clutch/backend/service/topology"
)
const (
Name = "clutch.module.topology"
)
func New(*any.Any, *zap.Logger, tally.Scope) (module.Module, error) {
client, ok := service.Registry["clutch.service.topology"]
if !ok {
return nil, errors.New("could not find topology service")
}
svc, ok := client.(topologyservice.Service)
if !ok {
return nil, errors.New("service was no the correct type")
}
mod := &mod{
topology: newTopologyAPI(svc),
}
return mod, nil
}
type mod struct {
topology topologyv1.TopologyAPIServer
}
func (m *mod) Register(r module.Registrar) error {
topologyv1.RegisterTopologyAPIServer(r.GRPCServer(), m.topology)
return r.RegisterJSONGateway(topologyv1.RegisterTopologyAPIHandler)
}
type topologyAPI struct {
topology topologyservice.Service
}
func newTopologyAPI(svc topologyservice.Service) topologyv1.TopologyAPIServer {
return &topologyAPI{
topology: svc,
}
}
func (t *topologyAPI) GetTopology(ctx context.Context, req *topologyv1.GetTopologyRequest) (*topologyv1.GetTopologyResponse, error) {
return nil, errors.New("not implemented")
}
func (t *topologyAPI) SearchTopology(ctx context.Context, req *topologyv1.SearchTopologyRequest) (*topologyv1.SearchTopologyResponse, error) {
return nil, errors.New("not implemented")
}
| 1 | 9,768 | nit: rename this in proto and update impl to `Search` to avoid stutter | lyft-clutch | go |
@@ -78,7 +78,8 @@ class SliderController extends AdminBaseController
->from(SliderItem::class, 's')
->where('s.domainId = :selectedDomainId')
->setParameter('selectedDomainId', $this->adminDomainTabsFacade->getSelectedDomainId())
- ->orderBy('s.position');
+ ->orderBy('s.position')
+ ->addOrderBy('s.id');
$dataSource = new QueryBuilderDataSource($queryBuilder, 's.id');
$grid = $this->gridFactory->create('sliderItemList', $dataSource); | 1 | <?php
namespace Shopsys\FrameworkBundle\Controller\Admin;
use Shopsys\FrameworkBundle\Component\Domain\AdminDomainTabsFacade;
use Shopsys\FrameworkBundle\Component\Grid\GridFactory;
use Shopsys\FrameworkBundle\Component\Grid\QueryBuilderDataSource;
use Shopsys\FrameworkBundle\Component\Router\Security\Annotation\CsrfProtection;
use Shopsys\FrameworkBundle\Form\Admin\Slider\SliderItemFormType;
use Shopsys\FrameworkBundle\Model\AdminNavigation\BreadcrumbOverrider;
use Shopsys\FrameworkBundle\Model\Slider\Exception\SliderItemNotFoundException;
use Shopsys\FrameworkBundle\Model\Slider\SliderItem;
use Shopsys\FrameworkBundle\Model\Slider\SliderItemDataFactoryInterface;
use Shopsys\FrameworkBundle\Model\Slider\SliderItemFacade;
use Symfony\Component\HttpFoundation\Request;
use Symfony\Component\Routing\Annotation\Route;
class SliderController extends AdminBaseController
{
/**
* @var \Shopsys\FrameworkBundle\Model\AdminNavigation\BreadcrumbOverrider
*/
protected $breadcrumbOverrider;
/**
* @var \Shopsys\FrameworkBundle\Component\Domain\AdminDomainTabsFacade
*/
protected $adminDomainTabsFacade;
/**
* @var \Shopsys\FrameworkBundle\Component\Grid\GridFactory
*/
protected $gridFactory;
/**
* @var \Shopsys\FrameworkBundle\Model\Slider\SliderItemFacade
*/
protected $sliderItemFacade;
/**
* @var \Shopsys\FrameworkBundle\Model\Slider\SliderItemDataFactoryInterface
*/
protected $sliderItemDataFactory;
/**
* @param \Shopsys\FrameworkBundle\Model\Slider\SliderItemFacade $sliderItemFacade
* @param \Shopsys\FrameworkBundle\Component\Grid\GridFactory $gridFactory
* @param \Shopsys\FrameworkBundle\Component\Domain\AdminDomainTabsFacade $adminDomainTabsFacade
* @param \Shopsys\FrameworkBundle\Model\AdminNavigation\BreadcrumbOverrider $breadcrumbOverrider
* @param \Shopsys\FrameworkBundle\Model\Slider\SliderItemDataFactoryInterface $sliderItemDataFactory
*/
public function __construct(
SliderItemFacade $sliderItemFacade,
GridFactory $gridFactory,
AdminDomainTabsFacade $adminDomainTabsFacade,
BreadcrumbOverrider $breadcrumbOverrider,
SliderItemDataFactoryInterface $sliderItemDataFactory
) {
$this->sliderItemFacade = $sliderItemFacade;
$this->gridFactory = $gridFactory;
$this->adminDomainTabsFacade = $adminDomainTabsFacade;
$this->breadcrumbOverrider = $breadcrumbOverrider;
$this->sliderItemDataFactory = $sliderItemDataFactory;
}
/**
* @Route("/slider/list/")
*/
public function listAction()
{
/** @var \Doctrine\Common\Persistence\ManagerRegistry $doctrine */
$doctrine = $this->getDoctrine();
/** @var \Doctrine\ORM\EntityManager $em */
$em = $doctrine->getManager();
$queryBuilder = $em->createQueryBuilder()
->select('s')
->from(SliderItem::class, 's')
->where('s.domainId = :selectedDomainId')
->setParameter('selectedDomainId', $this->adminDomainTabsFacade->getSelectedDomainId())
->orderBy('s.position');
$dataSource = new QueryBuilderDataSource($queryBuilder, 's.id');
$grid = $this->gridFactory->create('sliderItemList', $dataSource);
$grid->enableDragAndDrop(SliderItem::class);
$grid->addColumn('name', 's.name', t('Name'));
$grid->addColumn('link', 's.link', t('Link'));
$grid->addEditActionColumn('admin_slider_edit', ['id' => 's.id']);
$grid->addDeleteActionColumn('admin_slider_delete', ['id' => 's.id'])
->setConfirmMessage(t('Do you really want to remove this page?'));
$grid->setTheme('@ShopsysFramework/Admin/Content/Slider/listGrid.html.twig');
return $this->render('@ShopsysFramework/Admin/Content/Slider/list.html.twig', [
'gridView' => $grid->createView(),
]);
}
/**
* @Route("/slider/item/new/")
* @param \Symfony\Component\HttpFoundation\Request $request
*/
public function newAction(Request $request)
{
$sliderItemData = $this->sliderItemDataFactory->create();
$sliderItemData->domainId = $this->adminDomainTabsFacade->getSelectedDomainId();
$form = $this->createForm(SliderItemFormType::class, $sliderItemData, [
'scenario' => SliderItemFormType::SCENARIO_CREATE,
'slider_item' => null,
]);
$form->handleRequest($request);
if ($form->isSubmitted() && $form->isValid()) {
$sliderItem = $this->sliderItemFacade->create($sliderItemData);
$this->addSuccessFlashTwig(
t('Slider page <strong><a href="{{ url }}">{{ name }}</a></strong> created'),
[
'name' => $sliderItem->getName(),
'url' => $this->generateUrl('admin_slider_edit', ['id' => $sliderItem->getId()]),
]
);
return $this->redirectToRoute('admin_slider_list');
}
if ($form->isSubmitted() && !$form->isValid()) {
$this->addErrorFlashTwig(t('Please check the correctness of all data filled.'));
}
return $this->render('@ShopsysFramework/Admin/Content/Slider/new.html.twig', [
'form' => $form->createView(),
'selectedDomainId' => $this->adminDomainTabsFacade->getSelectedDomainId(),
]);
}
/**
* @Route("/slider/item/edit/{id}", requirements={"id"="\d+"})
* @param \Symfony\Component\HttpFoundation\Request $request
* @param int $id
*/
public function editAction(Request $request, $id)
{
$sliderItem = $this->sliderItemFacade->getById($id);
$sliderItemData = $this->sliderItemDataFactory->createFromSliderItem($sliderItem);
$form = $this->createForm(SliderItemFormType::class, $sliderItemData, [
'scenario' => SliderItemFormType::SCENARIO_EDIT,
'slider_item' => $sliderItem,
]);
$form->handleRequest($request);
if ($form->isSubmitted() && $form->isValid()) {
$this->sliderItemFacade->edit($id, $sliderItemData);
$this->addSuccessFlashTwig(
t('Slider page <strong><a href="{{ url }}">{{ name }}</a></strong> modified'),
[
'name' => $sliderItem->getName(),
'url' => $this->generateUrl('admin_slider_edit', ['id' => $sliderItem->getId()]),
]
);
return $this->redirectToRoute('admin_slider_list');
}
if ($form->isSubmitted() && !$form->isValid()) {
$this->addErrorFlash(t('Please check the correctness of all data filled.'));
}
$this->breadcrumbOverrider->overrideLastItem(
t('Editing slider page - %name%', ['%name%' => $sliderItem->getName()])
);
return $this->render('@ShopsysFramework/Admin/Content/Slider/edit.html.twig', [
'form' => $form->createView(),
'sliderItem' => $sliderItem,
]);
}
/**
* @Route("/slider/item/delete/{id}", requirements={"id" = "\d+"})
* @CsrfProtection
* @param int $id
*/
public function deleteAction($id)
{
try {
$name = $this->sliderItemFacade->getById($id)->getName();
$this->sliderItemFacade->delete($id);
$this->addSuccessFlashTwig(
t('Page <strong>{{ name }}</strong> deleted'),
[
'name' => $name,
]
);
} catch (SliderItemNotFoundException $ex) {
$this->addErrorFlash(t('Selected page doesn\'t exist.'));
}
return $this->redirectToRoute('admin_slider_list');
}
}
| 1 | 24,272 | is it necessary to order by id as a second ordering? this will take place only when several new slider items are created without reordering (position is then null). Wouldn't be better to recalculate position after creating a new item? (right now items with null in position behave differently in administration and on frontend) | shopsys-shopsys | php |
@@ -18,8 +18,10 @@ import (
)
var (
- claimFromRewardingFundBaseGas = uint64(10000)
- claimFromRewardingFundGasPerByte = uint64(100)
+ // ClaimFromRewardingFundBaseGas represents the base intrinsic gas for claimFromRewardingFund
+ ClaimFromRewardingFundBaseGas = uint64(10000)
+ // ClaimFromRewardingFundGasPerByte represents the claimFromRewardingFund payload gas per uint
+ ClaimFromRewardingFundGasPerByte = uint64(100)
)
// ClaimFromRewardingFund is the action to claim reward from the rewarding fund | 1 | // Copyright (c) 2019 IoTeX
// This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no
// warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent
// permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache
// License 2.0 that can be found in the LICENSE file.
package action
import (
"math"
"math/big"
"github.com/golang/protobuf/proto"
"github.com/pkg/errors"
"github.com/iotexproject/iotex-core/pkg/util/byteutil"
"github.com/iotexproject/iotex-core/protogen/iotextypes"
)
var (
claimFromRewardingFundBaseGas = uint64(10000)
claimFromRewardingFundGasPerByte = uint64(100)
)
// ClaimFromRewardingFund is the action to claim reward from the rewarding fund
type ClaimFromRewardingFund struct {
AbstractAction
amount *big.Int
data []byte
}
// Amount returns the amount to claim
func (c *ClaimFromRewardingFund) Amount() *big.Int { return c.amount }
// Data returns the additional data
func (c *ClaimFromRewardingFund) Data() []byte { return c.data }
// ByteStream returns a raw byte stream of a claim action
func (c *ClaimFromRewardingFund) ByteStream() []byte {
return byteutil.Must(proto.Marshal(c.Proto()))
}
// Proto converts a claim action struct to a claim action protobuf
func (c *ClaimFromRewardingFund) Proto() *iotextypes.ClaimFromRewardingFund {
return &iotextypes.ClaimFromRewardingFund{
Amount: c.amount.String(),
Data: c.data,
}
}
// LoadProto converts a claim action protobuf to a claim action struct
func (c *ClaimFromRewardingFund) LoadProto(claim *iotextypes.ClaimFromRewardingFund) error {
*c = ClaimFromRewardingFund{}
amount, ok := big.NewInt(0).SetString(claim.Amount, 10)
if !ok {
return errors.New("failed to set claim amount")
}
c.amount = amount
c.data = claim.Data
return nil
}
// IntrinsicGas returns the intrinsic gas of a claim action
func (c *ClaimFromRewardingFund) IntrinsicGas() (uint64, error) {
dataLen := uint64(len(c.Data()))
if (math.MaxUint64-claimFromRewardingFundBaseGas)/claimFromRewardingFundGasPerByte < dataLen {
return 0, ErrOutOfGas
}
return claimFromRewardingFundBaseGas + claimFromRewardingFundGasPerByte*dataLen, nil
}
// Cost returns the total cost of a claim action
func (c *ClaimFromRewardingFund) Cost() (*big.Int, error) {
intrinsicGas, err := c.IntrinsicGas()
if err != nil {
return nil, errors.Wrap(err, "error when getting intrinsic gas for the claim action")
}
return big.NewInt(0).Mul(c.GasPrice(), big.NewInt(0).SetUint64(intrinsicGas)), nil
}
// ClaimFromRewardingFundBuilder is the struct to build ClaimFromRewardingFund
type ClaimFromRewardingFundBuilder struct {
Builder
claim ClaimFromRewardingFund
}
// SetAmount sets the amount to claim
func (b *ClaimFromRewardingFundBuilder) SetAmount(amount *big.Int) *ClaimFromRewardingFundBuilder {
b.claim.amount = amount
return b
}
// SetData sets the additional data
func (b *ClaimFromRewardingFundBuilder) SetData(data []byte) *ClaimFromRewardingFundBuilder {
b.claim.data = data
return b
}
// Build builds a new claim from rewarding fund action
func (b *ClaimFromRewardingFundBuilder) Build() ClaimFromRewardingFund {
b.claim.AbstractAction = b.Builder.Build()
return b.claim
}
| 1 | 17,289 | `ClaimFromRewardingFundBaseGas` is a global variable (from `gochecknoglobals`) | iotexproject-iotex-core | go |
@@ -89,9 +89,11 @@ namespace RDKit {
}
PyObject *GetSubstructMatch(const ROMol &mol, const ROMol &query,bool useChirality=false,
bool useQueryQueryMatches=false){
- NOGIL gil;
MatchVectType matches;
- SubstructMatch(mol,query,matches,true,useChirality,useQueryQueryMatches);
+ {
+ NOGIL gil;
+ SubstructMatch(mol,query,matches,true,useChirality,useQueryQueryMatches);
+ }
return convertMatches(matches);
}
| 1 | // $Id$
//
// Copyright (C) 2003-2009 Greg Landrum and Rational Discovery LLC
//
// @@ All Rights Reserved @@
// This file is part of the RDKit.
// The contents are covered by the terms of the BSD license
// which is included in the file license.txt, found at the root
// of the RDKit source tree.
//
#define NO_IMPORT_ARRAY
#include <boost/python.hpp>
#include <string>
#include "rdchem.h"
#include "seqs.hpp"
// ours
#include <RDBoost/pyint_api.h>
#include <RDBoost/Wrap.h>
#include <GraphMol/RDKitBase.h>
#include <GraphMol/QueryOps.h>
#include <GraphMol/MolPickler.h>
#include <GraphMol/Substruct/SubstructMatch.h>
#include <boost/python/iterator.hpp>
#include <boost/python/copy_non_const_reference.hpp>
namespace python = boost::python;
namespace RDKit {
python::object MolToBinary(const ROMol &self){
std::string res;
{
NOGIL gil;
MolPickler::pickleMol(self,res);
}
python::object retval = python::object(python::handle<>(PyBytes_FromStringAndSize(res.c_str(),res.length())));
return retval;
}
//
// allows molecules to be pickled.
// since molecules have a constructor that takes a binary string
// we only need to provide getinitargs()
//
struct mol_pickle_suite : python::pickle_suite
{
static python::tuple
getinitargs(const ROMol& self)
{
return python::make_tuple(MolToBinary(self));
};
};
bool HasSubstructMatchStr(std::string pkl, const ROMol &query,
bool recursionPossible=true,bool useChirality=false,
bool useQueryQueryMatches=false){
NOGIL gil;
ROMol *mol;
try {
mol = new ROMol(pkl);
} catch (...) {
mol = NULL;
}
if(!mol){
throw ValueErrorException("Null Molecule");
}
MatchVectType res;
bool hasM=SubstructMatch(*mol,query,res,recursionPossible,useChirality,useQueryQueryMatches);
delete mol;
return hasM;
}
bool HasSubstructMatch(const ROMol &mol, const ROMol &query,
bool recursionPossible=true,bool useChirality=false,
bool useQueryQueryMatches=false){
NOGIL gil;
MatchVectType res;
return SubstructMatch(mol,query,res,recursionPossible,useChirality,useQueryQueryMatches);
}
PyObject *convertMatches(MatchVectType &matches){
PyObject *res = PyTuple_New(matches.size());
MatchVectType::const_iterator i;
for(i=matches.begin();i!=matches.end();i++){
PyTuple_SetItem(res,i->first,PyInt_FromLong(i->second));
}
return res;
}
PyObject *GetSubstructMatch(const ROMol &mol, const ROMol &query,bool useChirality=false,
bool useQueryQueryMatches=false){
NOGIL gil;
MatchVectType matches;
SubstructMatch(mol,query,matches,true,useChirality,useQueryQueryMatches);
return convertMatches(matches);
}
PyObject *GetSubstructMatches(const ROMol &mol, const ROMol &query,bool uniquify=true,
bool useChirality=false,
bool useQueryQueryMatches=false,
unsigned int maxMatches = 1000){
std::vector< MatchVectType > matches;
int matched = SubstructMatch(mol,query,matches,uniquify,true,useChirality,useQueryQueryMatches,maxMatches);
PyObject *res = PyTuple_New(matched);
for(int idx=0;idx<matched;idx++){
PyTuple_SetItem(res,idx,convertMatches(matches[idx]));
}
return res;
}
unsigned int AddMolConformer(ROMol &mol, Conformer *conf, bool assignId=false) {
Conformer *nconf = new Conformer(*conf);
return mol.addConformer(nconf, assignId);
}
Conformer *GetMolConformer(ROMol &mol, int id=-1) {
return &(mol.getConformer(id));
}
PyObject* GetMolConformers(ROMol &mol) {
PyObject *res = PyTuple_New(mol.getNumConformers());
ROMol::ConformerIterator ci;
unsigned int i = 0;
for (ci = mol.beginConformers(); ci != mol.endConformers(); ci++) {
PyTuple_SetItem(res, i, python::converter::shared_ptr_to_python(*ci));
i++;
}
return res;
}
std::string MolGetProp(const ROMol &mol,const char *key){
if(!mol.hasProp(key)){
PyErr_SetString(PyExc_KeyError,key);
throw python::error_already_set();
}
std::string res;
mol.getProp(key,res);
return res;
}
int MolHasProp(const ROMol &mol,const char *key){
int res = mol.hasProp(key);
//std::cout << "key: " << key << ": " << res << std::endl;
return res;
}
void MolSetProp(const ROMol &mol,const char *key,std::string val,
bool computed=false){
mol.setProp(key, val, computed);
}
void MolClearProp(const ROMol &mol,const char *key) {
if(!mol.hasProp(key)){
return;
}
mol.clearProp(key);
}
void MolClearComputedProps(const ROMol &mol) {
mol.clearComputedProps();
}
void MolDebug(const ROMol &mol){
mol.debugMol(std::cout);
}
// FIX: we should eventually figure out how to do iterators properly
AtomIterSeq *MolGetAtoms(ROMol *mol){
AtomIterSeq *res = new AtomIterSeq(mol->beginAtoms(),mol->endAtoms());
return res;
}
QueryAtomIterSeq *MolGetAromaticAtoms(ROMol *mol){
QueryAtom *qa=new QueryAtom();
qa->setQuery(makeAtomAromaticQuery());
QueryAtomIterSeq *res = new QueryAtomIterSeq(mol->beginQueryAtoms(qa),
mol->endQueryAtoms());
return res;
}
QueryAtomIterSeq *MolGetQueryAtoms(ROMol *mol,QueryAtom *qa){
QueryAtomIterSeq *res = new QueryAtomIterSeq(mol->beginQueryAtoms(qa),
mol->endQueryAtoms());
return res;
}
//AtomIterSeq *MolGetHeteros(ROMol *mol){
// AtomIterSeq *res = new AtomIterSeq(mol->beginHeteros(),
// mol->endHeteros());
// return res;
//}
BondIterSeq *MolGetBonds(ROMol *mol){
BondIterSeq *res = new BondIterSeq(mol->beginBonds(),mol->endBonds());
return res;
}
int getMolNumAtoms(const ROMol &mol, int onlyHeavy, bool onlyExplicit){
if(onlyHeavy>-1){
BOOST_LOG(rdWarningLog)<<"WARNING: the onlyHeavy argument to mol.GetNumAtoms() has been deprecated. Please use the onlyExplicit argument instead or mol.GetNumHeavyAtoms() if you want the heavy atom count."<<std::endl;
return mol.getNumAtoms(onlyHeavy);
}
return mol.getNumAtoms(onlyExplicit);
}
class ReadWriteMol : public RWMol {
public:
ReadWriteMol(const ROMol &m,bool quickCopy=false,int confId=-1) : RWMol(m,quickCopy,confId){
};
void RemoveAtom(unsigned int idx){
removeAtom(idx);
};
void RemoveBond(unsigned int idx1,unsigned int idx2){
removeBond(idx1,idx2);
};
int AddBond(unsigned int begAtomIdx,
unsigned int endAtomIdx,
Bond::BondType order=Bond::UNSPECIFIED)
{
return addBond(begAtomIdx,endAtomIdx,order);
};
int AddAtom(Atom *atom){
PRECONDITION(atom,"bad atom");
return addAtom(atom,true,false);
};
void ReplaceAtom(unsigned int idx,Atom *atom){
replaceAtom(idx,atom);
};
ROMol *GetMol() const{
ROMol *res=new ROMol(*this);
return res;
}
};
std::string molClassDoc = "The Molecule class.\n\n\
In addition to the expected Atoms and Bonds, molecules contain:\n\
- a collection of Atom and Bond bookmarks indexed with integers\n\
that can be used to flag and retrieve particular Atoms or Bonds\n\
using the {get|set}{Atom|Bond}Bookmark() methods.\n\n\
- a set of string-valued properties. These can have arbitrary string\n\
labels and can be set and retrieved using the {set|get}Prop() methods\n\
Molecular properties can be tagged as being *computed*, in which case\n\
they will be automatically cleared under certain circumstances (when the\n\
molecule itself is modified, for example).\n\
Molecules also have the concept of *private* properties, which are tagged\n\
by beginning the property name with an underscore (_).\n";
std::string rwmolClassDoc = "The RW molecule class (read/write)\n\n\
This class is a more-performant version of the EditableMolecule class in that\n\
it is a 'live' molecule and shares the interface from the Mol class.\n\
All changes are performed without the need to create a copy of the\n\
molecule using GetMol() (this is still available, however).\n\
\n\
n.b. Eventually this class may become a direct replacement for EditableMol";
struct mol_wrapper {
static void wrap(){
python::register_exception_translator<ConformerException>(&rdExceptionTranslator);
python::class_<ROMol,ROMOL_SPTR,boost::noncopyable>("Mol",
molClassDoc.c_str(),
python::init<>("Constructor, takes no arguments"))
.def(python::init<const std::string &>())
.def(python::init<const ROMol &>())
.def(python::init<const ROMol &,bool>())
.def(python::init<const ROMol &,bool,int>())
.def("__copy__",&generic__copy__<ROMol>)
.def("__deepcopy__",&generic__deepcopy__<ROMol>)
.def("GetNumAtoms",getMolNumAtoms,
(python::arg("onlyHeavy")=-1,
python::arg("onlyExplicit")=true),
"Returns the number of atoms in the molecule.\n\n"
" ARGUMENTS:\n"
" - onlyExplicit: (optional) include only explicit atoms (atoms in the molecular graph)\n"
" defaults to 1.\n"
" NOTE: the onlyHeavy argument is deprecated\n"
)
.def("GetNumHeavyAtoms",&ROMol::getNumHeavyAtoms,
"Returns the number of heavy atoms (atomic number >1) in the molecule.\n\n"
)
.def("GetAtomWithIdx",(Atom * (ROMol::*)(unsigned int))&ROMol::getAtomWithIdx,
python::return_internal_reference<1,
python::with_custodian_and_ward_postcall<0,1> >(),
"Returns a particular Atom.\n\n"
" ARGUMENTS:\n"
" - idx: which Atom to return\n\n"
" NOTE: atom indices start at 0\n")
.def("GetNumBonds",&ROMol::getNumBonds,
(python::arg("onlyHeavy")=true),
"Returns the number of Bonds in the molecule.\n\n"
" ARGUMENTS:\n"
" - onlyHeavy: (optional) include only bonds to heavy atoms (not Hs)\n"
" defaults to 1.\n")
.def("GetBondWithIdx",(Bond * (ROMol::*)(unsigned int))&ROMol::getBondWithIdx,
python::return_internal_reference<1,
python::with_custodian_and_ward_postcall<0,1> >(),
"Returns a particular Bond.\n\n"
" ARGUMENTS:\n"
" - idx: which Bond to return\n\n"
" NOTE: bond indices start at 0\n")
.def("GetNumConformers", &ROMol::getNumConformers,
"Return the number of conformations on the molecule")
.def("AddConformer", AddMolConformer,
(python::arg("self"),python::arg("conf"),
python::arg("assignId")=false),
"Add a conformer to the molecule and return the conformer ID")
.def("GetConformer", GetMolConformer,
(python::arg("self"),python::arg("id")=-1),
"Get the conformer with a specified ID",
python::return_internal_reference<1,
python::with_custodian_and_ward_postcall<0,1> >())
.def("GetConformers", GetMolConformers,
"Get all the conformers as a tuple")
.def("RemoveAllConformers", &ROMol::clearConformers,
"Remove all the conformations on the molecule")
.def("RemoveConformer", &ROMol::removeConformer,
"Remove the conformer with the specified ID")
.def("GetBondBetweenAtoms",
(Bond *(ROMol::*)(unsigned int,unsigned int))&ROMol::getBondBetweenAtoms,
python::return_internal_reference<1,
python::with_custodian_and_ward_postcall<0,1> >(),
"Returns the bond between two atoms, if there is one.\n\n"
" ARGUMENTS:\n"
" - idx1,idx2: the Atom indices\n\n"
" Returns:\n"
" The Bond between the two atoms, if such a bond exists.\n"
" If there is no Bond between the atoms, None is returned instead.\n\n"
" NOTE: bond indices start at 0\n"
)
// substructures
.def("HasSubstructMatch",HasSubstructMatch,
(python::arg("self"),python::arg("query"),
python::arg("recursionPossible")=true,
python::arg("useChirality")=false,
python::arg("useQueryQueryMatches")=false),
"Queries whether or not the molecule contains a particular substructure.\n\n"
" ARGUMENTS:\n"
" - query: a Molecule\n\n"
" - recursionPossible: (optional)\n\n"
" - useChirality: enables the use of stereochemistry in the matching\n\n"
" - useQueryQueryMatches: use query-query matching logic\n\n"
" RETURNS: True or False\n")
.def("GetSubstructMatch",GetSubstructMatch,
(python::arg("self"),python::arg("query"),
python::arg("useChirality")=false,
python::arg("useQueryQueryMatches")=false),
"Returns the indices of the molecule's atoms that match a substructure query.\n\n"
" ARGUMENTS:\n"
" - query: a Molecule\n\n"
" - useChirality: enables the use of stereochemistry in the matching\n\n"
" - useQueryQueryMatches: use query-query matching logic\n\n"
" RETURNS: a tuple of integers\n\n"
" NOTES:\n"
" - only a single match is returned\n"
" - the ordering of the indices corresponds to the atom ordering\n"
" in the query. For example, the first index is for the atom in\n"
" this molecule that matches the first atom in the query.\n"
)
.def("GetSubstructMatches",
GetSubstructMatches,
(python::arg("self"),python::arg("query"),
python::arg("uniquify")=true,
python::arg("useChirality")=false,
python::arg("useQueryQueryMatches")=false,
python::arg("maxMatches")=1000),
"Returns tuples of the indices of the molecule's atoms that match a substructure query.\n\n"
" ARGUMENTS:\n"
" - query: a Molecule.\n"
" - uniquify: (optional) determines whether or not the matches are uniquified.\n"
" Defaults to 1.\n\n"
" - useChirality: enables the use of stereochemistry in the matching\n\n"
" - useQueryQueryMatches: use query-query matching logic\n\n"
" RETURNS: a tuple of tuples of integers\n\n"
" NOTE:\n"
" - the ordering of the indices corresponds to the atom ordering\n"
" in the query. For example, the first index is for the atom in\n"
" this molecule that matches the first atom in the query.\n")
// properties
.def("SetProp",MolSetProp,
(python::arg("self"), python::arg("key"),
python::arg("val"), python::arg("computed")=false),
"Sets a molecular property\n\n"
" ARGUMENTS:\n"
" - key: the name of the property to be set (a string).\n"
" - value: the property value (a string).\n"
" - computed: (optional) marks the property as being computed.\n"
" Defaults to 0.\n\n")
.def("HasProp",MolHasProp,
"Queries a molecule to see if a particular property has been assigned.\n\n"
" ARGUMENTS:\n"
" - key: the name of the property to check for (a string).\n")
.def("GetProp",MolGetProp,
"Returns the value of the property.\n\n"
" ARGUMENTS:\n"
" - key: the name of the property to return (a string).\n\n"
" RETURNS: a string\n\n"
" NOTE:\n"
" - If the property has not been set, a KeyError exception will be raised.\n")
.def("ClearProp", MolClearProp,
"Removes a property from the molecule.\n\n"
" ARGUMENTS:\n"
" - key: the name of the property to clear (a string).\n")
.def("ClearComputedProps", MolClearComputedProps,
"Removes all computed properties from the molecule.\n\n")
.def("UpdatePropertyCache", &ROMol::updatePropertyCache,
(python::arg("self"),python::arg("strict")=true),
"Regenerates computed properties like implicit valence and ring information.\n\n")
.def("NeedsUpdatePropertyCache", &ROMol::needsUpdatePropertyCache,
(python::arg("self")),
"Returns true or false depending on whether implicit and explicit valence of the molecule have already been calculated.\n\n")
.def("GetPropNames",&ROMol::getPropList,
(python::arg("self"),python::arg("includePrivate")=false,
python::arg("includeComputed")=false),
"Returns a tuple with all property names for this molecule.\n\n"
" ARGUMENTS:\n"
" - includePrivate: (optional) toggles inclusion of private properties in the result set.\n"
" Defaults to 0.\n"
" - includeComputed: (optional) toggles inclusion of computed properties in the result set.\n"
" Defaults to 0.\n\n"
" RETURNS: a tuple of strings\n")
.def("GetAtoms",MolGetAtoms,
python::return_value_policy<python::manage_new_object,
python::with_custodian_and_ward_postcall<0,1> >(),
"Returns a read-only sequence containing all of the molecule's Atoms.\n")
.def("GetAromaticAtoms",MolGetAromaticAtoms,
python::return_value_policy<python::manage_new_object,
python::with_custodian_and_ward_postcall<0,1> >(),
"Returns a read-only sequence containing all of the molecule's aromatic Atoms.\n")
.def("GetAtomsMatchingQuery",MolGetQueryAtoms,
python::return_value_policy<python::manage_new_object,
python::with_custodian_and_ward_postcall<0,1> >(),
"Returns a read-only sequence containing all of the atoms in a molecule that match the query atom.\n")
.def("GetBonds",MolGetBonds,
python::return_value_policy<python::manage_new_object,
python::with_custodian_and_ward_postcall<0,1> >(),
"Returns a read-only sequence containing all of the molecule's Bonds.\n")
// enable pickle support
.def_pickle(mol_pickle_suite())
.def("Debug",MolDebug,
"Prints debugging information about the molecule.\n")
.def("ToBinary",MolToBinary,
"Returns a binary string representation of the molecule.\n")
.def("GetRingInfo",&ROMol::getRingInfo,
python::return_value_policy<python::reference_existing_object>(),
"Returns the number of molecule's RingInfo object.\n\n")
;
// ---------------------------------------------------------------------------------------------
python::def("_HasSubstructMatchStr",
HasSubstructMatchStr,
(python::arg("pkl"),python::arg("query"),
python::arg("recursionPossible")=true,
python::arg("useChirality")=false,
python::arg("useQueryQueryMatches")=false
),
"This function is included to speed substructure queries from databases, \n"
"it's probably not of\n"
"general interest.\n\n"
" ARGUMENTS:\n"
" - pkl: a Molecule pickle\n\n"
" - query: a Molecule\n\n"
" - recursionPossible: (optional)\n\n"
" - useChirality: (optional)\n\n"
" - useQueryQueryMatches: use query-query matching logic\n\n"
" RETURNS: True or False\n");
python::class_<ReadWriteMol, python::bases<ROMol> >("RWMol",
rwmolClassDoc.c_str(),
python::init<const ROMol &>("Construct from a Mol"))
.def(python::init<const ROMol &,bool>())
.def(python::init<const ROMol &,bool,int>())
.def("__copy__",&generic__copy__<ReadWriteMol>)
.def("__deepcopy__",&generic__deepcopy__<ReadWriteMol>)
.def("RemoveAtom",&ReadWriteMol::RemoveAtom,
"Remove the specified atom from the molecule")
.def("RemoveBond",&ReadWriteMol::RemoveBond,
"Remove the specified bond from the molecule")
.def("AddBond",&ReadWriteMol::AddBond,
(python::arg("mol"),python::arg("beginAtomIdx"),python::arg("endAtomIdx"),
python::arg("order")=Bond::UNSPECIFIED),
"add a bond, returns the new number of bonds")
.def("AddAtom",&ReadWriteMol::AddAtom,
(python::arg("mol"),python::arg("atom")),
"add an atom, returns the index of the newly added atom")
.def("ReplaceAtom",&ReadWriteMol::ReplaceAtom,
(python::arg("mol"),python::arg("index"),python::arg("newAtom")),
"replaces the specified atom with the provided one")
.def("GetMol",&ReadWriteMol::GetMol,
"Returns a Mol (a normal molecule)",
python::return_value_policy<python::manage_new_object>())
;
};
};
}// end of namespace
void wrap_mol() {
RDKit::mol_wrapper::wrap();
}
| 1 | 14,213 | How about modifying the NOGIL definition so that it only does anything if the thread safety flag (RDK_BUILD_THREADSAFE_SSS) is set? | rdkit-rdkit | cpp |
@@ -29,6 +29,11 @@ type chainState interface {
LatestState(ctx context.Context) (state.Tree, error)
}
+// BlockTimer defines a interface to a struct that can give the current block height.
+type BlockTimer interface {
+ BlockHeight() (uint64, error)
+}
+
// PublishFunc is a function the Sender calls to publish a message to the network.
type PublishFunc func(topic string, data []byte) error
| 1 | package msg
import (
"context"
"sync"
"github.com/ipfs/go-cid"
"github.com/pkg/errors"
"github.com/filecoin-project/go-filecoin/abi"
"github.com/filecoin-project/go-filecoin/actor"
"github.com/filecoin-project/go-filecoin/address"
"github.com/filecoin-project/go-filecoin/chain"
"github.com/filecoin-project/go-filecoin/consensus"
"github.com/filecoin-project/go-filecoin/core"
"github.com/filecoin-project/go-filecoin/metrics"
"github.com/filecoin-project/go-filecoin/state"
"github.com/filecoin-project/go-filecoin/types"
)
var msgSendErrCt = metrics.NewInt64Counter("message_sender_error", "Number of errors encountered while sending a message")
// Topic is the network pubsub topic identifier on which new messages are announced.
const Topic = "/fil/msgs"
// Abstracts over a store of blockchain state.
type chainState interface {
// LatestState returns the latest chain state.
LatestState(ctx context.Context) (state.Tree, error)
}
// PublishFunc is a function the Sender calls to publish a message to the network.
type PublishFunc func(topic string, data []byte) error
// Sender is plumbing implementation that knows how to send a message.
type Sender struct {
// Signs messages.
signer types.Signer
// Provides actor state
chainState chainState
// Provides the current block height
blockTimer core.BlockTimer
// Tracks inbound messages for mining
inbox *core.MessagePool
// Tracks outbound messages
outbox *core.MessageQueue
// Validates messages before sending them.
validator consensus.SignedMessageValidator
// Invoked to publish the new message to the network.
publish PublishFunc
// Protects the "next nonce" calculation to avoid collisions.
l sync.Mutex
}
// NewSender returns a new Sender. There should be exactly one of these per node because
// sending locks to reduce nonce collisions.
func NewSender(signer types.Signer, chainReader chain.ReadStore, blockTimer core.BlockTimer,
msgQueue *core.MessageQueue, msgPool *core.MessagePool,
validator consensus.SignedMessageValidator, publish PublishFunc) *Sender {
return &Sender{
signer: signer,
chainState: chainReader,
blockTimer: blockTimer,
inbox: msgPool,
outbox: msgQueue,
validator: validator,
publish: publish,
}
}
// Send sends a message. See api description.
func (s *Sender) Send(ctx context.Context, from, to address.Address, value *types.AttoFIL, gasPrice types.AttoFIL, gasLimit types.GasUnits, method string, params ...interface{}) (out cid.Cid, err error) {
defer func() {
if err != nil {
msgSendErrCt.Inc(ctx, 1)
}
}()
encodedParams, err := abi.ToEncodedValues(params...)
if err != nil {
return cid.Undef, errors.Wrap(err, "invalid params")
}
// Lock to avoid race for message nonce.
s.l.Lock()
defer s.l.Unlock()
st, err := s.chainState.LatestState(ctx)
if err != nil {
return cid.Undef, errors.Wrap(err, "failed to load state from chain")
}
fromActor, err := st.GetActor(ctx, from)
if err != nil {
return cid.Undef, errors.Wrapf(err, "no actor at address %s", from)
}
nonce, err := nextNonce(fromActor, s.outbox, from)
if err != nil {
return cid.Undef, errors.Wrapf(err, "failed calculating nonce for actor %s", from)
}
msg := types.NewMessage(from, to, nonce, value, method, encodedParams)
smsg, err := types.NewSignedMessage(*msg, s.signer, gasPrice, gasLimit)
if err != nil {
return cid.Undef, errors.Wrap(err, "failed to sign message")
}
err = s.validator.Validate(ctx, smsg, fromActor)
if err != nil {
return cid.Undef, errors.Wrap(err, "invalid message")
}
smsgdata, err := smsg.Marshal()
if err != nil {
return cid.Undef, errors.Wrap(err, "failed to marshal message")
}
height, err := s.blockTimer.BlockHeight()
if err != nil {
return cid.Undef, errors.Wrap(err, "failed to get block height")
}
// Add to the local message queue/pool at the last possible moment before broadcasting to network.
if err := s.outbox.Enqueue(smsg, height); err != nil {
return cid.Undef, errors.Wrap(err, "failed to add message to outbound queue")
}
if _, err := s.inbox.Add(smsg); err != nil {
return cid.Undef, errors.Wrap(err, "failed to add message to message pool")
}
if err = s.publish(Topic, smsgdata); err != nil {
return cid.Undef, errors.Wrap(err, "failed to publish message to network")
}
log.Debugf("MessageSend with message: %s", smsg)
return smsg.Cid()
}
// nextNonce returns the next expected nonce value for an account actor. This is the larger
// of the actor's nonce value, or one greater than the largest nonce from the actor found in the message pool.
func nextNonce(act *actor.Actor, outbox *core.MessageQueue, address address.Address) (uint64, error) {
actorNonce, err := actor.NextNonce(act)
if err != nil {
return 0, err
}
poolNonce, found := outbox.LargestNonce(address)
if found && poolNonce >= actorNonce {
return poolNonce + 1, nil
}
return actorNonce, nil
}
| 1 | 18,451 | blockheighter would be more clear | filecoin-project-venus | go |
@@ -949,5 +949,15 @@ void OptimizerUtils::copyIndexScanData(const nebula::graph::IndexScan* from,
to->setFilter(from->filter() == nullptr ? nullptr : from->filter()->clone());
}
+Status OptimizerUtils::compareAndSwapBound(std::pair<Value, bool>& a, std::pair<Value, bool>& b) {
+ if (a.first > b.first) {
+ std::swap(a, b);
+ } else if (a.first < b.first) { // do nothing
+ } else if (a.second > b.second) {
+ std::swap(a, b);
+ }
+ return Status::OK();
+}
+
} // namespace graph
} // namespace nebula | 1 | /* Copyright (c) 2020 vesoft inc. All rights reserved.
*
* This source code is licensed under Apache 2.0 License.
*/
#include "graph/optimizer/OptimizerUtils.h"
#include <algorithm>
#include <iterator>
#include <memory>
#include <unordered_set>
#include "common/base/Status.h"
#include "common/datatypes/Value.h"
#include "common/expression/ConstantExpression.h"
#include "common/expression/Expression.h"
#include "common/expression/LogicalExpression.h"
#include "common/expression/PropertyExpression.h"
#include "common/expression/RelationalExpression.h"
#include "graph/planner/plan/Query.h"
#include "interface/gen-cpp2/meta_types.h"
#include "interface/gen-cpp2/storage_types.h"
using nebula::meta::cpp2::ColumnDef;
using nebula::meta::cpp2::IndexItem;
using nebula::storage::cpp2::IndexColumnHint;
using nebula::storage::cpp2::IndexQueryContext;
using BVO = nebula::graph::OptimizerUtils::BoundValueOperator;
using ExprKind = nebula::Expression::Kind;
namespace nebula {
namespace graph {
Value OptimizerUtils::boundValue(const meta::cpp2::ColumnDef& col,
BoundValueOperator op,
const Value& v) {
switch (op) {
case BoundValueOperator::GREATER_THAN: {
return boundValueWithGT(col, v);
}
case BoundValueOperator::LESS_THAN: {
return boundValueWithLT(col, v);
}
case BoundValueOperator::MAX: {
return boundValueWithMax(col);
}
case BoundValueOperator::MIN: {
return boundValueWithMin(col);
}
}
return Value::kNullBadType;
}
Value OptimizerUtils::boundValueWithGT(const meta::cpp2::ColumnDef& col, const Value& v) {
auto type = SchemaUtil::propTypeToValueType(col.get_type().get_type());
switch (type) {
case Value::Type::INT: {
if (v.getInt() == std::numeric_limits<int64_t>::max()) {
return v;
} else {
return v + 1;
}
}
case Value::Type::FLOAT: {
if (v.getFloat() > 0.0) {
if (v.getFloat() == std::numeric_limits<double_t>::max()) {
return v;
}
} else if (v.getFloat() == 0.0) {
return Value(std::numeric_limits<double_t>::min());
} else {
if (v.getFloat() == -std::numeric_limits<double_t>::min()) {
return Value(0.0);
}
}
return v.getFloat() + kEpsilon;
}
case Value::Type::STRING: {
if (!col.type.type_length_ref().has_value()) {
return Value::kNullBadType;
}
std::vector<unsigned char> bytes(v.getStr().begin(), v.getStr().end());
bytes.resize(*col.get_type().type_length_ref());
for (size_t i = bytes.size();; i--) {
if (i > 0) {
if (bytes[i - 1]++ != 255) break;
} else {
return Value(std::string(*col.get_type().type_length_ref(), '\377'));
}
}
return Value(std::string(bytes.begin(), bytes.end()));
}
case Value::Type::DATE: {
if (Date(std::numeric_limits<int16_t>::max(), 12, 31) == v.getDate()) {
return v.getDate();
} else if (Date() == v.getDate()) {
return Date(0, 1, 2);
}
auto d = v.getDate();
if (d.day < 31) {
d.day += 1;
} else {
d.day = 1;
if (d.month < 12) {
d.month += 1;
} else {
d.month = 1;
if (d.year < std::numeric_limits<int16_t>::max()) {
d.year += 1;
} else {
return v.getDate();
}
}
}
return Value(d);
}
case Value::Type::TIME: {
auto t = v.getTime();
// Ignore the time zone.
if (t.microsec < 999999) {
t.microsec = t.microsec + 1;
} else {
t.microsec = 0;
if (t.sec < 59) {
t.sec += 1;
} else {
t.sec = 0;
if (t.minute < 59) {
t.minute += 1;
} else {
t.minute = 0;
if (t.hour < 23) {
t.hour += 1;
} else {
return v.getTime();
}
}
}
}
return Value(t);
}
case Value::Type::DATETIME: {
auto dt = v.getDateTime();
// Ignore the time zone.
if (dt.microsec < 999999) {
dt.microsec = dt.microsec + 1;
} else {
dt.microsec = 0;
if (dt.sec < 59) {
dt.sec += 1;
} else {
dt.sec = 0;
if (dt.minute < 59) {
dt.minute += 1;
} else {
dt.minute = 0;
if (dt.hour < 23) {
dt.hour += 1;
} else {
dt.hour = 0;
if (dt.day < 31) {
dt.day += 1;
} else {
dt.day = 1;
if (dt.month < 12) {
dt.month += 1;
} else {
dt.month = 1;
if (dt.year < std::numeric_limits<int16_t>::max()) {
dt.year += 1;
} else {
return v.getDateTime();
}
}
}
}
}
}
}
return Value(dt);
}
case Value::Type::__EMPTY__:
case Value::Type::BOOL:
case Value::Type::NULLVALUE:
case Value::Type::VERTEX:
case Value::Type::EDGE:
case Value::Type::LIST:
case Value::Type::SET:
case Value::Type::MAP:
case Value::Type::DATASET:
case Value::Type::GEOGRAPHY: // TODO(jie)
case Value::Type::PATH: {
DLOG(FATAL) << "Not supported value type " << type << "for index.";
return Value::kNullBadType;
}
}
DLOG(FATAL) << "Unknown value type " << static_cast<int>(type);
return Value::kNullBadType;
}
Value OptimizerUtils::boundValueWithLT(const meta::cpp2::ColumnDef& col, const Value& v) {
auto type = SchemaUtil::propTypeToValueType(col.get_type().get_type());
switch (type) {
case Value::Type::INT: {
if (v.getInt() == std::numeric_limits<int64_t>::min()) {
return v;
} else {
return v - 1;
}
}
case Value::Type::FLOAT: {
if (v.getFloat() < 0.0) {
if (v.getFloat() == -std::numeric_limits<double_t>::max()) {
return v;
} else if (v.getFloat() == -std::numeric_limits<double_t>::min()) {
return Value(0.0);
}
} else if (v.getFloat() == 0.0) {
return Value(-std::numeric_limits<double_t>::min());
}
return v.getFloat() - kEpsilon;
}
case Value::Type::STRING: {
if (!col.type.type_length_ref().has_value()) {
return Value::kNullBadType;
}
std::vector<unsigned char> bytes(v.getStr().begin(), v.getStr().end());
bytes.resize(*col.get_type().type_length_ref());
for (size_t i = bytes.size();; i--) {
if (i > 0) {
if (bytes[i - 1]-- != 0) break;
} else {
return Value(std::string(*col.get_type().type_length_ref(), '\0'));
}
}
return Value(std::string(bytes.begin(), bytes.end()));
}
case Value::Type::DATE: {
if (Date() == v.getDate()) {
return v.getDate();
}
auto d = v.getDate();
if (d.day > 1) {
d.day -= 1;
} else {
d.day = 31;
if (d.month > 1) {
d.month -= 1;
} else {
d.month = 12;
if (d.year > 1) {
d.year -= 1;
} else {
return v.getDate();
}
}
}
return Value(d);
}
case Value::Type::TIME: {
if (Time() == v.getTime()) {
return v.getTime();
}
auto t = v.getTime();
if (t.microsec >= 1) {
t.microsec -= 1;
} else {
t.microsec = 999999;
if (t.sec >= 1) {
t.sec -= 1;
} else {
t.sec = 59;
if (t.minute >= 1) {
t.minute -= 1;
} else {
t.minute = 59;
if (t.hour >= 1) {
t.hour -= 1;
} else {
return v.getTime();
}
}
}
}
return Value(t);
}
case Value::Type::DATETIME: {
if (DateTime() == v.getDateTime()) {
return v.getDateTime();
}
auto dt = v.getDateTime();
if (dt.microsec >= 1) {
dt.microsec -= 1;
} else {
dt.microsec = 999999;
if (dt.sec >= 1) {
dt.sec -= 1;
} else {
dt.sec = 59;
if (dt.minute >= 1) {
dt.minute -= 1;
} else {
dt.minute = 59;
if (dt.hour >= 1) {
dt.hour -= 1;
} else {
dt.hour = 23;
if (dt.day > 1) {
dt.day -= 1;
} else {
dt.day = 31;
if (dt.month > 1) {
dt.month -= 1;
} else {
dt.month = 12;
if (dt.year > 1) {
dt.year -= 1;
} else {
return v.getDateTime();
}
}
}
}
}
}
}
return Value(dt);
}
case Value::Type::__EMPTY__:
case Value::Type::BOOL:
case Value::Type::NULLVALUE:
case Value::Type::VERTEX:
case Value::Type::EDGE:
case Value::Type::LIST:
case Value::Type::SET:
case Value::Type::MAP:
case Value::Type::DATASET:
case Value::Type::GEOGRAPHY: // TODO(jie)
case Value::Type::PATH: {
DLOG(FATAL) << "Not supported value type " << type << "for index.";
return Value::kNullBadType;
}
}
DLOG(FATAL) << "Unknown value type " << static_cast<int>(type);
return Value::kNullBadType;
}
Value OptimizerUtils::boundValueWithMax(const meta::cpp2::ColumnDef& col) {
auto type = SchemaUtil::propTypeToValueType(col.get_type().get_type());
switch (type) {
case Value::Type::INT: {
return Value(std::numeric_limits<int64_t>::max());
}
case Value::Type::FLOAT: {
return Value(std::numeric_limits<double>::max());
}
case Value::Type::STRING: {
if (!col.type.type_length_ref().has_value()) {
return Value::kNullBadType;
}
return Value(std::string(*col.get_type().type_length_ref(), '\377'));
}
case Value::Type::DATE: {
Date d;
d.year = std::numeric_limits<int16_t>::max();
d.month = 12;
d.day = 31;
return Value(d);
}
case Value::Type::TIME: {
Time dt;
dt.hour = 23;
dt.minute = 59;
dt.sec = 59;
dt.microsec = 999999;
return Value(dt);
}
case Value::Type::DATETIME: {
DateTime dt;
dt.year = std::numeric_limits<int16_t>::max();
dt.month = 12;
dt.day = 31;
dt.hour = 23;
dt.minute = 59;
dt.sec = 59;
dt.microsec = 999999;
return Value(dt);
}
case Value::Type::__EMPTY__:
case Value::Type::BOOL:
case Value::Type::NULLVALUE:
case Value::Type::VERTEX:
case Value::Type::EDGE:
case Value::Type::LIST:
case Value::Type::SET:
case Value::Type::MAP:
case Value::Type::DATASET:
case Value::Type::GEOGRAPHY: // TODO(jie)
case Value::Type::PATH: {
DLOG(FATAL) << "Not supported value type " << type << "for index.";
return Value::kNullBadType;
}
}
DLOG(FATAL) << "Unknown value type " << static_cast<int>(type);
return Value::kNullBadType;
}
Value OptimizerUtils::boundValueWithMin(const meta::cpp2::ColumnDef& col) {
auto type = SchemaUtil::propTypeToValueType(col.get_type().get_type());
switch (type) {
case Value::Type::INT: {
return Value(std::numeric_limits<int64_t>::min());
}
case Value::Type::FLOAT: {
return Value(-std::numeric_limits<double>::max());
}
case Value::Type::STRING: {
if (!col.type.type_length_ref().has_value()) {
return Value::kNullBadType;
}
return Value(std::string(*col.get_type().type_length_ref(), '\0'));
}
case Value::Type::DATE: {
return Value(Date());
}
case Value::Type::TIME: {
return Value(Time());
}
case Value::Type::DATETIME: {
return Value(DateTime());
}
case Value::Type::__EMPTY__:
case Value::Type::BOOL:
case Value::Type::NULLVALUE:
case Value::Type::VERTEX:
case Value::Type::EDGE:
case Value::Type::LIST:
case Value::Type::SET:
case Value::Type::MAP:
case Value::Type::DATASET:
case Value::Type::GEOGRAPHY: // TODO(jie)
case Value::Type::PATH: {
DLOG(FATAL) << "Not supported value type " << type << "for index.";
return Value::kNullBadType;
}
}
DLOG(FATAL) << "Unknown value type " << static_cast<int>(type);
return Value::kNullBadType;
}
Value OptimizerUtils::normalizeValue(const meta::cpp2::ColumnDef& col, const Value& v) {
auto type = SchemaUtil::propTypeToValueType(col.get_type().get_type());
switch (type) {
case Value::Type::INT:
case Value::Type::FLOAT:
case Value::Type::BOOL:
case Value::Type::DATE:
case Value::Type::TIME:
case Value::Type::DATETIME: {
return v;
}
case Value::Type::STRING: {
if (!col.type.type_length_ref().has_value()) {
return Value::kNullBadType;
}
return v;
}
case Value::Type::__EMPTY__:
case Value::Type::NULLVALUE:
case Value::Type::VERTEX:
case Value::Type::EDGE:
case Value::Type::LIST:
case Value::Type::SET:
case Value::Type::MAP:
case Value::Type::DATASET:
case Value::Type::GEOGRAPHY: // TODO(jie)
case Value::Type::PATH: {
DLOG(FATAL) << "Not supported value type " << type << "for index.";
return Value::kNullBadType;
}
}
DLOG(FATAL) << "Unknown value type " << static_cast<int>(type);
return Value::kNullBadType;
}
Status OptimizerUtils::boundValue(Expression::Kind kind,
const Value& val,
const meta::cpp2::ColumnDef& col,
Value& begin,
Value& end) {
if (val.type() != graph::SchemaUtil::propTypeToValueType(col.type.type)) {
return Status::SemanticError("Data type error of field : %s", col.get_name().c_str());
}
switch (kind) {
case Expression::Kind::kRelLE: {
// if c1 <= int(5) , the range pair should be (min, 6)
// if c1 < int(5), the range pair should be (min, 5)
auto v = OptimizerUtils::boundValue(col, BoundValueOperator::GREATER_THAN, val);
if (v == Value::kNullBadType) {
LOG(ERROR) << "Get bound value error. field : " << col.get_name();
return Status::Error("Get bound value error. field : %s", col.get_name().c_str());
}
// where c <= 1 and c <= 2 , 1 should be valid.
if (end.empty()) {
end = v;
} else {
end = v < end ? v : end;
}
break;
}
case Expression::Kind::kRelGE: {
// where c >= 1 and c >= 2 , 2 should be valid.
if (begin.empty()) {
begin = val;
} else {
begin = val < begin ? begin : val;
}
break;
}
case Expression::Kind::kRelLT: {
// c < 5 and c < 6 , 5 should be valid.
if (end.empty()) {
end = val;
} else {
end = val < end ? val : end;
}
break;
}
case Expression::Kind::kRelGT: {
// if c >= 5, the range pair should be (5, max)
// if c > 5, the range pair should be (6, max)
auto v = OptimizerUtils::boundValue(col, BoundValueOperator::GREATER_THAN, val);
if (v == Value::kNullBadType) {
LOG(ERROR) << "Get bound value error. field : " << col.get_name();
return Status::Error("Get bound value error. field : %s", col.get_name().c_str());
}
// where c > 1 and c > 2 , 2 should be valid.
if (begin.empty()) {
begin = v;
} else {
begin = v < begin ? begin : v;
}
break;
}
default: {
// TODO(yee): Semantic error
return Status::Error("Invalid expression kind.");
}
}
return Status::OK();
}
namespace {
// IndexScore is used to find the optimal index. The larger the score, the
// better the index. When it is a score sequence, the length of the sequence
// should also be considered, such as: {2, 1, 0} > {2, 1} > {2, 0, 1} > {2, 0} >
// {2} > {1, 2} > {1, 1} > {1}
enum class IndexScore : uint8_t {
kNotEqual = 0,
kRange = 1,
kPrefix = 2,
};
struct ScoredColumnHint {
storage::cpp2::IndexColumnHint hint;
IndexScore score;
};
struct IndexResult {
const meta::cpp2::IndexItem* index;
// expressions not used in all `ScoredColumnHint'
std::vector<const Expression*> unusedExprs;
std::vector<ScoredColumnHint> hints;
bool operator<(const IndexResult& rhs) const {
if (hints.empty()) return true;
auto sz = std::min(hints.size(), rhs.hints.size());
for (size_t i = 0; i < sz; i++) {
if (hints[i].score < rhs.hints[i].score) {
return true;
}
if (hints[i].score > rhs.hints[i].score) {
return false;
}
}
return hints.size() < rhs.hints.size();
}
};
Status checkValue(const ColumnDef& field, BVO bvo, Value* value) {
if (value->empty()) {
*value = OptimizerUtils::boundValue(field, bvo, Value());
if (value->isBadNull()) {
return Status::Error("Get bound value error. field : %s", field.get_name().c_str());
}
}
return Status::OK();
}
Status handleRangeIndex(const meta::cpp2::ColumnDef& field,
const Expression* expr,
const Value& value,
IndexColumnHint* hint) {
if (field.get_type().get_type() == nebula::cpp2::PropertyType::BOOL) {
return Status::Error("Range scan for bool type is illegal");
}
Value begin, end;
NG_RETURN_IF_ERROR(OptimizerUtils::boundValue(expr->kind(), value, field, begin, end));
NG_RETURN_IF_ERROR(checkValue(field, BVO::MIN, &begin));
NG_RETURN_IF_ERROR(checkValue(field, BVO::MAX, &end));
hint->set_begin_value(std::move(begin));
hint->set_end_value(std::move(end));
hint->set_scan_type(storage::cpp2::ScanType::RANGE);
hint->set_column_name(field.get_name());
return Status::OK();
}
void handleEqualIndex(const ColumnDef& field, const Value& value, IndexColumnHint* hint) {
hint->set_scan_type(storage::cpp2::ScanType::PREFIX);
hint->set_column_name(field.get_name());
hint->set_begin_value(OptimizerUtils::normalizeValue(field, value));
}
StatusOr<ScoredColumnHint> selectRelExprIndex(const ColumnDef& field,
const RelationalExpression* expr) {
// TODO(yee): Reverse expression
auto left = expr->left();
DCHECK(left->kind() == Expression::Kind::kEdgeProperty ||
left->kind() == Expression::Kind::kTagProperty);
auto propExpr = static_cast<const PropertyExpression*>(left);
if (propExpr->prop() != field.get_name()) {
return Status::Error("Invalid field name.");
}
auto right = expr->right();
if (expr->kind() == Expression::Kind::kRelIn) { // container expressions
DCHECK(right->isContainerExpr());
} else { // other expressions
DCHECK(right->kind() == Expression::Kind::kConstant);
}
const auto& value = static_cast<const ConstantExpression*>(right)->value();
ScoredColumnHint hint;
switch (expr->kind()) {
case Expression::Kind::kRelEQ: {
handleEqualIndex(field, value, &hint.hint);
hint.score = IndexScore::kPrefix;
break;
}
case Expression::Kind::kRelGE:
case Expression::Kind::kRelGT:
case Expression::Kind::kRelLE:
case Expression::Kind::kRelLT: {
NG_RETURN_IF_ERROR(handleRangeIndex(field, expr, value, &hint.hint));
hint.score = IndexScore::kRange;
break;
}
case Expression::Kind::kRelNE: {
hint.score = IndexScore::kNotEqual;
break;
}
default: {
return Status::Error("Invalid expression kind");
}
}
return hint;
}
StatusOr<IndexResult> selectRelExprIndex(const RelationalExpression* expr, const IndexItem& index) {
const auto& fields = index.get_fields();
if (fields.empty()) {
return Status::Error("Index(%s) does not have any fields.", index.get_index_name().c_str());
}
auto status = selectRelExprIndex(fields[0], expr);
NG_RETURN_IF_ERROR(status);
IndexResult result;
result.hints.emplace_back(std::move(status).value());
result.index = &index;
return result;
}
bool mergeRangeColumnHints(const ColumnDef& field,
const std::vector<ScoredColumnHint>& hints,
Value* begin,
Value* end) {
for (auto& h : hints) {
switch (h.score) {
case IndexScore::kRange: {
if (h.hint.begin_value_ref().is_set()) {
const auto& value = h.hint.get_begin_value();
if (begin->empty() || *begin < value) {
*begin = value;
}
}
if (h.hint.end_value_ref().is_set()) {
const auto& value = h.hint.get_end_value();
if (end->empty() || *end > value) {
*end = value;
}
}
break;
}
case IndexScore::kPrefix: {
// Prefix value <=> range [value, value]
const auto& value = h.hint.get_begin_value();
Value b, e;
auto status = OptimizerUtils::boundValue(ExprKind::kRelGE, value, field, b, e);
if (!status.ok()) return false;
if (begin->empty() || *begin < b) {
*begin = b;
}
status = OptimizerUtils::boundValue(ExprKind::kRelLE, value, field, b, e);
if (!status.ok()) return false;
if (end->empty() || *end > e) {
*end = e;
}
break;
}
case IndexScore::kNotEqual: {
return false;
}
}
}
return !(*begin >= *end);
}
bool getIndexColumnHintInExpr(const ColumnDef& field,
const LogicalExpression* expr,
ScoredColumnHint* hint,
std::vector<Expression*>* operands) {
std::vector<ScoredColumnHint> hints;
for (auto& operand : expr->operands()) {
if (!operand->isRelExpr()) continue;
auto relExpr = static_cast<const RelationalExpression*>(operand);
auto status = selectRelExprIndex(field, relExpr);
if (status.ok()) {
hints.emplace_back(std::move(status).value());
operands->emplace_back(operand);
}
}
if (hints.empty()) return false;
if (hints.size() == 1) {
*hint = hints.front();
return true;
}
Value begin, end;
if (!mergeRangeColumnHints(field, hints, &begin, &end)) {
return false;
}
ScoredColumnHint h;
h.hint.set_column_name(field.get_name());
// Change scan type to prefix if begin + 1 == end
Value newBegin, newEnd;
auto status = OptimizerUtils::boundValue(ExprKind::kRelGT, begin, field, newBegin, newEnd);
if (!status.ok()) {
// TODO(yee): differentiate between empty set and invalid index to use
return false;
}
if (newBegin < end) {
// end > newBegin > begin
h.hint.set_scan_type(storage::cpp2::ScanType::RANGE);
h.hint.set_begin_value(std::move(begin));
h.hint.set_end_value(std::move(end));
h.score = IndexScore::kRange;
} else if (newBegin == end) {
// end == neBegin == begin + 1
h.hint.set_scan_type(storage::cpp2::ScanType::PREFIX);
h.hint.set_begin_value(std::move(begin));
h.score = IndexScore::kPrefix;
} else {
return false;
}
*hint = std::move(h);
return true;
}
std::vector<const Expression*> collectUnusedExpr(
const LogicalExpression* expr, const std::unordered_set<const Expression*>& usedOperands) {
std::vector<const Expression*> unusedOperands;
for (auto& operand : expr->operands()) {
auto iter = std::find(usedOperands.begin(), usedOperands.end(), operand);
if (iter == usedOperands.end()) {
unusedOperands.emplace_back(operand);
}
}
return unusedOperands;
}
StatusOr<IndexResult> selectLogicalExprIndex(const LogicalExpression* expr,
const IndexItem& index) {
if (expr->kind() != Expression::Kind::kLogicalAnd) {
return Status::Error("Invalid expression kind.");
}
IndexResult result;
result.hints.reserve(index.get_fields().size());
std::unordered_set<const Expression*> usedOperands;
for (auto& field : index.get_fields()) {
ScoredColumnHint hint;
std::vector<Expression*> operands;
if (!getIndexColumnHintInExpr(field, expr, &hint, &operands)) {
break;
}
result.hints.emplace_back(std::move(hint));
for (auto op : operands) {
usedOperands.insert(op);
}
}
if (result.hints.empty()) {
return Status::Error("There is not index to use.");
}
result.unusedExprs = collectUnusedExpr(expr, usedOperands);
result.index = &index;
return result;
}
StatusOr<IndexResult> selectIndex(const Expression* expr, const IndexItem& index) {
if (expr->isRelExpr()) {
return selectRelExprIndex(static_cast<const RelationalExpression*>(expr), index);
}
if (expr->isLogicalExpr()) {
return selectLogicalExprIndex(static_cast<const LogicalExpression*>(expr), index);
}
return Status::Error("Invalid expression kind.");
}
} // namespace
void OptimizerUtils::eraseInvalidIndexItems(
int32_t schemaId, std::vector<std::shared_ptr<nebula::meta::cpp2::IndexItem>>* indexItems) {
// Erase invalid index items
for (auto iter = indexItems->begin(); iter != indexItems->end();) {
auto schema = (*iter)->get_schema_id();
if (schema.tag_id_ref().has_value() && schema.get_tag_id() != schemaId) {
iter = indexItems->erase(iter);
} else if (schema.edge_type_ref().has_value() && schema.get_edge_type() != schemaId) {
iter = indexItems->erase(iter);
} else {
iter++;
}
}
}
bool OptimizerUtils::findOptimalIndex(const Expression* condition,
const std::vector<std::shared_ptr<IndexItem>>& indexItems,
bool* isPrefixScan,
IndexQueryContext* ictx) {
// Return directly if there is no valid index to use.
if (indexItems.empty()) {
return false;
}
std::vector<IndexResult> results;
for (auto& index : indexItems) {
auto resStatus = selectIndex(condition, *index);
if (resStatus.ok()) {
results.emplace_back(std::move(resStatus).value());
}
}
if (results.empty()) {
return false;
}
std::sort(results.begin(), results.end());
auto& index = results.back();
if (index.hints.empty()) {
return false;
}
*isPrefixScan = false;
std::vector<storage::cpp2::IndexColumnHint> hints;
hints.reserve(index.hints.size());
auto iter = index.hints.begin();
// Use full scan if the highest index score is NotEqual
if (iter->score == IndexScore::kNotEqual) {
return false;
}
for (; iter != index.hints.end(); ++iter) {
auto& hint = *iter;
if (hint.score == IndexScore::kPrefix) {
hints.emplace_back(std::move(hint.hint));
*isPrefixScan = true;
continue;
}
if (hint.score == IndexScore::kRange) {
hints.emplace_back(std::move(hint.hint));
// skip the case first range hint is the last hint
// when set filter in index query context
++iter;
}
break;
}
// The filter can always be pushed down for lookup query
if (iter != index.hints.end() || !index.unusedExprs.empty()) {
ictx->set_filter(condition->encode());
}
ictx->set_index_id(index.index->get_index_id());
ictx->set_column_hints(std::move(hints));
return true;
}
// Check if the relational expression has a valid index
// The left operand should either be a kEdgeProperty or kTagProperty expr
bool OptimizerUtils::relExprHasIndex(
const Expression* expr,
const std::vector<std::shared_ptr<nebula::meta::cpp2::IndexItem>>& indexItems) {
DCHECK(expr->isRelExpr());
for (auto& index : indexItems) {
const auto& fields = index->get_fields();
if (fields.empty()) {
return false;
}
auto left = static_cast<const RelationalExpression*>(expr)->left();
DCHECK(left->kind() == Expression::Kind::kEdgeProperty ||
left->kind() == Expression::Kind::kTagProperty);
auto propExpr = static_cast<const PropertyExpression*>(left);
if (propExpr->prop() == fields[0].get_name()) {
return true;
}
}
return false;
}
void OptimizerUtils::copyIndexScanData(const nebula::graph::IndexScan* from,
nebula::graph::IndexScan* to) {
to->setEmptyResultSet(from->isEmptyResultSet());
to->setSpace(from->space());
to->setReturnCols(from->returnColumns());
to->setIsEdge(from->isEdge());
to->setSchemaId(from->schemaId());
to->setDedup(from->dedup());
to->setOrderBy(from->orderBy());
to->setLimit(from->limit());
to->setFilter(from->filter() == nullptr ? nullptr : from->filter()->clone());
}
} // namespace graph
} // namespace nebula
| 1 | 31,923 | This is so weird as a utility function. | vesoft-inc-nebula | cpp |
@@ -58,7 +58,8 @@ func TestPlanQuery(t *testing.T) {
return m
}
- cmpopt := cmp.AllowUnexported(dynamodb.ScanInput{}, dynamodb.QueryInput{}, dynamodb.AttributeValue{})
+ // Ignores the ConsistentRead field from both QueryInput and ScanInput.
+ cmpopt := cmp.FilterPath(func(p cmp.Path) bool { return p.Last().String() == ".ConsistentRead" }, cmp.Ignore())
for _, test := range []struct {
desc string | 1 | // Copyright 2019 The Go Cloud Development Kit Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package awsdynamodb
import (
"fmt"
"strings"
"testing"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/dynamodb"
"github.com/google/go-cmp/cmp"
"gocloud.dev/docstore/driver"
"gocloud.dev/docstore/drivertest"
)
func TestPlanQuery(t *testing.T) {
c := &collection{
table: "T",
partitionKey: "tableP",
description: &dynamodb.TableDescription{},
opts: &Options{AllowScans: true, RevisionField: "rev"},
}
// Build an ExpressionAttributeNames map with the given names.
eans := func(names ...string) map[string]*string {
m := map[string]*string{}
for i, n := range names {
m[fmt.Sprintf("#%d", i)] = aws.String(n)
}
return m
}
// Build an ExpressionAttributeValues map. Filter values are always the number 1
// and the keys are always :0, :1, ..., so we only need to know how many entries.
eavs := func(n int) map[string]*dynamodb.AttributeValue {
if n == 0 {
return nil
}
one := new(dynamodb.AttributeValue).SetN("1")
m := map[string]*dynamodb.AttributeValue{}
for i := 0; i < n; i++ {
m[fmt.Sprintf(":%d", i)] = one
}
return m
}
cmpopt := cmp.AllowUnexported(dynamodb.ScanInput{}, dynamodb.QueryInput{}, dynamodb.AttributeValue{})
for _, test := range []struct {
desc string
// In all cases, the table has a partition key called "tableP".
tableSortKey string // if non-empty, the table sort key
localIndexSortKey string // if non-empty, there is a local index with this sort key
localIndexFields []string // the fields projected into the local index
globalIndexPartitionKey string // if non-empty, there is a global index with this partition key
globalIndexSortKey string // if non-empty, the global index has this sort key
globalIndexFields []string // the fields projected into the global index
query *driver.Query
want interface{} // either a ScanInput or a QueryInput
wantPlan string
}{
{
desc: "empty query",
// A query with no filters requires a scan.
query: &driver.Query{},
want: &dynamodb.ScanInput{TableName: &c.table},
wantPlan: "Scan",
},
{
desc: "equality filter on table partition field",
// A filter that compares the table's partition key for equality is the minimum
// requirement for querying the table.
query: &driver.Query{Filters: []driver.Filter{{[]string{"tableP"}, "=", 1}}},
want: &dynamodb.QueryInput{
KeyConditionExpression: aws.String("#0 = :0"),
ExpressionAttributeNames: eans("tableP"),
ExpressionAttributeValues: eavs(1),
},
wantPlan: "Table",
},
{
desc: "equality filter on table partition field (sort key)",
// Same as above, but the table has a sort key; shouldn't make a difference.
tableSortKey: "tableS",
query: &driver.Query{Filters: []driver.Filter{{[]string{"tableP"}, "=", 1}}},
want: &dynamodb.QueryInput{
KeyConditionExpression: aws.String("#0 = :0"),
ExpressionAttributeNames: eans("tableP"),
ExpressionAttributeValues: eavs(1),
},
wantPlan: "Table",
},
{
desc: "equality filter on other field",
// This query has an equality filter, but not on the table's partition key.
// Since there are no matching indexes, we must scan.
query: &driver.Query{Filters: []driver.Filter{{[]string{"other"}, "=", 1}}},
want: &dynamodb.ScanInput{
FilterExpression: aws.String("#0 = :0"),
ExpressionAttributeNames: eans("other"),
ExpressionAttributeValues: eavs(1),
},
wantPlan: "Scan",
},
{
desc: "non-equality filter on table partition field",
// If the query doesn't have an equality filter on the partition key, and there
// are no indexes, we must scan. The filter becomes a FilterExpression, evaluated
// on the backend.
query: &driver.Query{Filters: []driver.Filter{{[]string{"tableP"}, ">", 1}}},
want: &dynamodb.ScanInput{
FilterExpression: aws.String("#0 > :0"),
ExpressionAttributeNames: eans("tableP"),
ExpressionAttributeValues: eavs(1),
},
wantPlan: "Scan",
},
{
desc: "equality filter on partition, filter on other",
// The equality filter on the table's partition key lets us query the table.
// The other filter is used in the filter expression.
query: &driver.Query{Filters: []driver.Filter{
{[]string{"tableP"}, "=", 1},
{[]string{"other"}, "<=", 1},
}},
want: &dynamodb.QueryInput{
KeyConditionExpression: aws.String("#1 = :1"),
FilterExpression: aws.String("#0 <= :0"),
ExpressionAttributeNames: eans("other", "tableP"),
ExpressionAttributeValues: eavs(2),
},
wantPlan: "Table",
},
{
desc: "equality filter on partition, filter on sort",
// If the table has a sort key and the query has a filter on it as well
// as an equality filter on the table's partition key, we can query the
// table.
tableSortKey: "tableS",
query: &driver.Query{Filters: []driver.Filter{
{[]string{"tableP"}, "=", 1},
{[]string{"tableS"}, "<=", 1},
}},
want: &dynamodb.QueryInput{
KeyConditionExpression: aws.String("(#0 = :0) AND (#1 <= :1)"),
ExpressionAttributeNames: eans("tableP", "tableS"),
ExpressionAttributeValues: eavs(2),
},
wantPlan: "Table",
},
{
desc: "equality filter on table partition, filter on local index sort",
// The equality filter on the table's partition key allows us to query
// the table, but there is a better choice: a local index with a sort key
// that is mentioned in the query.
localIndexSortKey: "localS",
query: &driver.Query{Filters: []driver.Filter{
{[]string{"tableP"}, "=", 1},
{[]string{"localS"}, "<=", 1},
}},
want: &dynamodb.QueryInput{
IndexName: aws.String("local"),
KeyConditionExpression: aws.String("(#0 = :0) AND (#1 <= :1)"),
ExpressionAttributeNames: eans("tableP", "localS"),
},
wantPlan: `Index: "local"`,
},
{
desc: "equality filter on table partition, filter on local index sort, bad projection",
// The equality filter on the table's partition key allows us to query
// the table. There seems to be a better choice: a local index with a sort key
// that is mentioned in the query. But the query wants the entire document,
// and the local index only has some fields.
localIndexSortKey: "localS",
localIndexFields: []string{}, // keys only
query: &driver.Query{Filters: []driver.Filter{
{[]string{"tableP"}, "=", 1},
{[]string{"localS"}, "<=", 1},
}},
want: &dynamodb.QueryInput{
KeyConditionExpression: aws.String("#1 = :1"),
FilterExpression: aws.String("#0 <= :0"),
ExpressionAttributeNames: eans("localS", "tableP"),
},
wantPlan: "Table",
},
{
desc: "equality filter on table partition, filter on local index sort, good projection",
// Same as above, but now the query no longer asks for all fields, so
// we will only read the requested fields from the table.
localIndexSortKey: "localS",
localIndexFields: []string{}, // keys only
query: &driver.Query{
FieldPaths: [][]string{{"tableP"}, {"localS"}},
Filters: []driver.Filter{
{[]string{"tableP"}, "=", 1},
{[]string{"localS"}, "<=", 1},
}},
want: &dynamodb.QueryInput{
IndexName: aws.String("local"),
KeyConditionExpression: aws.String("(#0 = :0) AND (#1 <= :1)"),
ExpressionAttributeNames: eans("tableP", "localS"),
ExpressionAttributeValues: eavs(2),
ProjectionExpression: aws.String("#0, #1"),
},
wantPlan: `Index: "local"`,
},
{
desc: "equality filter on table partition, filters on local index and table sort",
// Given the choice of querying the table or a local index, prefer the table.
tableSortKey: "tableS",
localIndexSortKey: "localS",
query: &driver.Query{Filters: []driver.Filter{
{[]string{"tableP"}, "=", 1},
{[]string{"localS"}, "<=", 1},
{[]string{"tableS"}, ">", 1},
}},
want: &dynamodb.QueryInput{
IndexName: nil,
KeyConditionExpression: aws.String("(#1 = :1) AND (#2 > :2)"),
FilterExpression: aws.String("#0 <= :0"),
ExpressionAttributeNames: eans("localS", "tableP", "tableS"),
},
wantPlan: "Table",
},
{
desc: "equality filter on other field with index",
// The query is the same as in "equality filter on other field," but now there
// is a global index with that field as partition key, so we can query it.
globalIndexPartitionKey: "other",
query: &driver.Query{Filters: []driver.Filter{{[]string{"other"}, "=", 1}}},
want: &dynamodb.QueryInput{
IndexName: aws.String("global"),
KeyConditionExpression: aws.String("#0 = :0"),
ExpressionAttributeNames: eans("other"),
},
wantPlan: `Index: "global"`,
},
{
desc: "equality filter on table partition, filter on global index sort",
// The equality filter on the table's partition key allows us to query
// the table, but there is a better choice: a global index with the same
// partition key and a sort key that is mentioned in the query.
// (In these tests, the global index has all the fields of the table by default.)
globalIndexPartitionKey: "tableP",
globalIndexSortKey: "globalS",
query: &driver.Query{Filters: []driver.Filter{
{[]string{"tableP"}, "=", 1},
{[]string{"globalS"}, "<=", 1},
}},
want: &dynamodb.QueryInput{
IndexName: aws.String("global"),
KeyConditionExpression: aws.String("(#0 = :0) AND (#1 <= :1)"),
ExpressionAttributeNames: eans("tableP", "globalS"),
},
wantPlan: `Index: "global"`,
},
{
desc: "equality filter on table partition, filter on global index sort, bad projection",
// Although there is a global index that matches the filters best, it doesn't
// have the necessary fields. So we query against the table.
// The query does not specify FilterPaths, so it retrieves the entire document.
// globalIndexFields explicitly lists the fields that the global index has.
// Since the global index does not have all the document fields, it can't be used.
globalIndexPartitionKey: "tableP",
globalIndexSortKey: "globalS",
globalIndexFields: []string{"other"},
query: &driver.Query{Filters: []driver.Filter{
{[]string{"tableP"}, "=", 1},
{[]string{"globalS"}, "<=", 1},
}},
want: &dynamodb.QueryInput{
IndexName: nil,
KeyConditionExpression: aws.String("#1 = :1"),
FilterExpression: aws.String("#0 <= :0"),
ExpressionAttributeNames: eans("globalS", "tableP"),
},
wantPlan: "Table",
},
{
desc: "equality filter on table partition, filter on global index sort, good projection",
// The global index matches the filters best and has the necessary
// fields. So we query against it.
globalIndexPartitionKey: "tableP",
globalIndexSortKey: "globalS",
globalIndexFields: []string{"other", "rev"},
query: &driver.Query{
FieldPaths: [][]string{{"other"}},
Filters: []driver.Filter{
{[]string{"tableP"}, "=", 1},
{[]string{"globalS"}, "<=", 1},
}},
want: &dynamodb.QueryInput{
IndexName: aws.String("global"),
KeyConditionExpression: aws.String("(#0 = :0) AND (#1 <= :1)"),
ProjectionExpression: aws.String("#2, #0"),
ExpressionAttributeNames: eans("tableP", "globalS", "other"),
ExpressionAttributeValues: eavs(2),
},
wantPlan: `Index: "global"`,
},
} {
t.Run(test.desc, func(t *testing.T) {
c.sortKey = test.tableSortKey
if test.localIndexSortKey == "" {
c.description.LocalSecondaryIndexes = nil
} else {
c.description.LocalSecondaryIndexes = []*dynamodb.LocalSecondaryIndexDescription{
{
IndexName: aws.String("local"),
KeySchema: keySchema("tableP", test.localIndexSortKey),
Projection: indexProjection(test.localIndexFields),
},
}
}
if test.globalIndexPartitionKey == "" {
c.description.GlobalSecondaryIndexes = nil
} else {
c.description.GlobalSecondaryIndexes = []*dynamodb.GlobalSecondaryIndexDescription{
{
IndexName: aws.String("global"),
KeySchema: keySchema(test.globalIndexPartitionKey, test.globalIndexSortKey),
Projection: indexProjection(test.globalIndexFields),
},
}
}
gotRunner, err := c.planQuery(test.query)
if err != nil {
t.Fatal(err)
}
var got interface{}
switch tw := test.want.(type) {
case *dynamodb.ScanInput:
got = gotRunner.scanIn
tw.TableName = &c.table
if tw.ExpressionAttributeValues == nil {
tw.ExpressionAttributeValues = eavs(len(tw.ExpressionAttributeNames))
}
case *dynamodb.QueryInput:
got = gotRunner.queryIn
tw.TableName = &c.table
if tw.ExpressionAttributeValues == nil {
tw.ExpressionAttributeValues = eavs(len(tw.ExpressionAttributeNames))
}
default:
t.Fatalf("bad type for test.want: %T", test.want)
}
if diff := cmp.Diff(got, test.want, cmpopt); diff != "" {
t.Error("input:\n", diff)
}
gotPlan := gotRunner.queryPlan()
if diff := cmp.Diff(gotPlan, test.wantPlan); diff != "" {
t.Error("plan:\n", diff)
}
})
}
}
func TestQueryNoScans(t *testing.T) {
c := &collection{
table: "T",
partitionKey: "tableP",
description: &dynamodb.TableDescription{},
opts: &Options{AllowScans: false},
}
for _, test := range []struct {
q *driver.Query
wantErr bool
}{
{&driver.Query{}, false},
{&driver.Query{Filters: []driver.Filter{{[]string{"other"}, "=", 1}}}, true},
} {
qr, err := c.planQuery(test.q)
if err != nil {
t.Fatalf("%v: %v", test.q, err)
}
err = c.checkPlan(qr)
if test.wantErr {
if err == nil || !strings.Contains(err.Error(), "AllowScans") {
t.Errorf("%v: got %v, want an error that mentions the AllowScans option", test.q, err)
}
} else if err != nil {
t.Errorf("%v: got %v, want nil", test.q, err)
}
}
}
// Make a key schema from the names of the partition and sort keys.
func keySchema(pkey, skey string) []*dynamodb.KeySchemaElement {
return []*dynamodb.KeySchemaElement{
{AttributeName: &pkey, KeyType: aws.String("HASH")},
{AttributeName: &skey, KeyType: aws.String("RANGE")},
}
}
func indexProjection(fields []string) *dynamodb.Projection {
var ptype string
switch {
case fields == nil:
ptype = "ALL"
case len(fields) == 0:
ptype = "KEYS_ONLY"
default:
ptype = "INCLUDE"
}
proj := &dynamodb.Projection{ProjectionType: &ptype}
for _, f := range fields {
f := f
proj.NonKeyAttributes = append(proj.NonKeyAttributes, &f)
}
return proj
}
func TestGlobalFieldsIncluded(t *testing.T) {
c := &collection{partitionKey: "tableP", sortKey: "tableS"}
gi := &dynamodb.GlobalSecondaryIndexDescription{
KeySchema: keySchema("globalP", "globalS"),
}
for _, test := range []struct {
desc string
queryFields []string
wantKeysOnly bool // when the projection includes only table and index keys
wantInclude bool // when the projection includes fields "f" and "g".
}{
{
desc: "all",
queryFields: nil,
wantKeysOnly: false,
wantInclude: false,
},
{
desc: "key fields",
queryFields: []string{"tableS", "globalP"},
wantKeysOnly: true,
wantInclude: true,
},
{
desc: "included fields",
queryFields: []string{"f", "g"},
wantKeysOnly: false,
wantInclude: true,
},
{
desc: "included and key fields",
queryFields: []string{"f", "g", "tableP", "globalS"},
wantKeysOnly: false,
wantInclude: true,
},
{
desc: "not included field",
queryFields: []string{"f", "g", "h"},
wantKeysOnly: false,
wantInclude: false,
},
} {
t.Run(test.desc, func(t *testing.T) {
var fps [][]string
for _, qf := range test.queryFields {
fps = append(fps, strings.Split(qf, "."))
}
q := &driver.Query{FieldPaths: fps}
for _, p := range []struct {
name string
proj *dynamodb.Projection
want bool
}{
{"ALL", indexProjection(nil), true},
{"KEYS_ONLY", indexProjection([]string{}), test.wantKeysOnly},
{"INCLUDE", indexProjection([]string{"f", "g"}), test.wantInclude},
} {
t.Run(p.name, func(t *testing.T) {
gi.Projection = p.proj
got := c.globalFieldsIncluded(q, gi)
if got != p.want {
t.Errorf("got %t, want %t", got, p.want)
}
})
}
})
}
}
func TestCompare(t *testing.T) {
tm := time.Now()
for _, test := range []struct {
a, b interface{}
want int
}{
{1, 1, 0},
{1, 2, -1},
{2, 1, 1},
{1.5, 2, -1},
{2.5, 2.1, 1},
{3.8, 3.8, 0},
{"x", "x", 0},
{"x", "xx", -1},
{"x", "a", 1},
{tm, tm, 0},
{tm, tm.Add(1), -1},
{tm, tm.Add(-1), 1},
{[]byte("x"), []byte("x"), 0},
{[]byte("x"), []byte("xx"), -1},
{[]byte("x"), []byte("a"), 1},
} {
got := compare(test.a, test.b)
if got != test.want {
t.Errorf("compare(%v, %v) = %d, want %d", test.a, test.b, got, test.want)
}
}
}
func TestCopyTopLevel(t *testing.T) {
type E struct{ C int }
type S struct {
A int
B int
E
}
s := &S{A: 1, B: 2, E: E{C: 3}}
m := map[string]interface{}{"A": 1, "B": 2, "C": 3}
for _, test := range []struct {
dest, src interface{}
want interface{}
}{
{
dest: map[string]interface{}{},
src: m,
want: m,
},
{
dest: &S{},
src: s,
want: s,
},
{
dest: map[string]interface{}{},
src: s,
want: m,
},
{
dest: &S{},
src: m,
want: s,
},
} {
dest := drivertest.MustDocument(test.dest)
src := drivertest.MustDocument(test.src)
if err := copyTopLevel(dest, src); err != nil {
t.Fatalf("src=%+v: %v", test.src, err)
}
if !cmp.Equal(test.dest, test.want) {
t.Errorf("src=%+v: got %v, want %v", test.src, test.dest, test.want)
}
}
}
| 1 | 19,704 | Why? What would happen if you did not? | google-go-cloud | go |
@@ -752,7 +752,9 @@ public class SmartStore {
String[] pathElements = path.split("[.]");
Object o = soup;
for (String pathElement : pathElements) {
- o = ((JSONObject) o).opt(pathElement);
+ if (o != null) {
+ o = ((JSONObject) o).opt(pathElement);
+ }
}
return o;
} | 1 | /*
* Copyright (c) 2012, salesforce.com, inc.
* All rights reserved.
* Redistribution and use of this software in source and binary forms, with or
* without modification, are permitted provided that the following conditions
* are met:
* - Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* - Neither the name of salesforce.com, inc. nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission of salesforce.com, inc.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
package com.salesforce.androidsdk.smartstore.store;
import java.util.ArrayList;
import java.util.List;
import net.sqlcipher.database.SQLiteDatabase;
import org.json.JSONArray;
import org.json.JSONException;
import org.json.JSONObject;
import android.content.ContentValues;
import android.database.Cursor;
import android.text.TextUtils;
import com.salesforce.androidsdk.smartstore.store.QuerySpec.QueryType;
/**
* Smart store
*
* Provides a secure means for SalesforceMobileSDK Container-based applications to store objects in a persistent
* and searchable manner. Similar in some ways to CouchDB, SmartStore stores documents as JSON values.
* SmartStore is inspired by the Apple Newton OS Soup/Store model.
* The main challenge here is how to effectively store documents with dynamic fields, and still allow indexing and searching.
*/
public class SmartStore {
// Default
public static final int DEFAULT_PAGE_SIZE = 10;
// Table to keep track of soup names
protected static final String SOUP_NAMES_TABLE = "soup_names";
// Table to keep track of soup's index specs
protected static final String SOUP_INDEX_MAP_TABLE = "soup_index_map";
// Columns of the soup index map table
protected static final String SOUP_NAME_COL = "soupName";
protected static final String PATH_COL = "path";
protected static final String COLUMN_NAME_COL = "columnName";
protected static final String COLUMN_TYPE_COL = "columnType";
// Predicates
protected static final String SOUP_NAME_PREDICATE = SOUP_NAME_COL + " = ?";
protected static final String PATH_PREDICATE = PATH_COL + " = ?";
// Columns of a soup table
protected static final String ID_COL = "id";
protected static final String CREATED_COL = "created";
protected static final String LAST_MODIFIED_COL = "lastModified";
protected static final String SOUP_COL = "soup";
// JSON fields added to soup element on insert/update
public static final String SOUP_ENTRY_ID = "_soupEntryId";
public static final String SOUP_LAST_MODIFIED_DATE = "_soupLastModifiedDate";
private static final String SOUP_ENTRY_ID_PREDICATE = ID_COL + " = ?";
// Backing database
protected SQLiteDatabase db;
/**
* Changes the encryption key on the smartstore.
*
* @param db Database object.
* @param newKey New encryption key.
*/
public static synchronized void changeKey(SQLiteDatabase db, String newKey) {
if (newKey != null && !newKey.trim().equals("")) {
db.execSQL("PRAGMA rekey = '" + newKey + "'");
}
}
/**
* Create soup index map table to keep track of soups' index specs
* Create soup name map table to keep track of soup name to table name mappings
* Called when the database is first created
*
* @param db
*/
public static void createMetaTables(SQLiteDatabase db) {
// Create soup_index_map table
StringBuilder sb = new StringBuilder();
sb.append("CREATE TABLE ").append(SOUP_INDEX_MAP_TABLE).append(" (")
.append(SOUP_NAME_COL).append(" TEXT")
.append(",").append(PATH_COL).append(" TEXT")
.append(",").append(COLUMN_NAME_COL).append(" TEXT")
.append(",").append(COLUMN_TYPE_COL).append(" TEXT")
.append(")");
db.execSQL(sb.toString());
// Add index on soup_name column
db.execSQL(String.format("CREATE INDEX %s on %s ( %s )", SOUP_INDEX_MAP_TABLE + "_0", SOUP_INDEX_MAP_TABLE, SOUP_NAME_COL));
// Create soup_names table
// The table name for the soup will simply be table_<soupId>
sb = new StringBuilder();
sb.append("CREATE TABLE ").append(SOUP_NAMES_TABLE).append(" (")
.append(ID_COL).append(" INTEGER PRIMARY KEY AUTOINCREMENT")
.append(",").append(SOUP_NAME_COL).append(" TEXT")
.append(")");
db.execSQL(sb.toString());
// Add index on soup_name column
db.execSQL(String.format("CREATE INDEX %s on %s ( %s )", SOUP_NAMES_TABLE + "_0", SOUP_NAMES_TABLE, SOUP_NAME_COL));
}
/**
* @param db
*/
public SmartStore(SQLiteDatabase db) {
this.db = db;
}
/**
* Start transaction
*/
public void beginTransaction() {
db.beginTransaction();
}
/**
* End transaction (commit or rollback)
*/
public void endTransaction() {
db.endTransaction();
}
/**
* Mark transaction as successful (next call to endTransaction will be a commit)
*/
public void setTransactionSuccessful() {
db.setTransactionSuccessful();
}
/**
* Register a soup
*
* Create table for soupName with a column for the soup itself and columns for paths specified in indexSpecs
* Create indexes on the new table to make lookup faster
* Create rows in soup index map table for indexSpecs
* @param soupName
* @param indexSpecs
*/
public void registerSoup(String soupName, IndexSpec[] indexSpecs) {
if (soupName == null) throw new SmartStoreException("Bogus soup name:" + soupName);
if (indexSpecs.length == 0) throw new SmartStoreException("No indexSpecs specified for soup: " + soupName);
if (hasSoup(soupName)) return; // soup already exist - do nothing
// First get a table name
String soupTableName = null;
ContentValues soupMapValues = new ContentValues();
soupMapValues.put(SOUP_NAME_COL, soupName);
try {
db.beginTransaction();
long soupId = DBHelper.INSTANCE.insert(db, SOUP_NAMES_TABLE, soupMapValues);
soupTableName = getSoupTableName(soupId);
db.setTransactionSuccessful();
}
finally {
db.endTransaction();
}
// Prepare SQL for creating soup table and its indices
StringBuilder createTableStmt = new StringBuilder(); // to create new soup table
List<String> createIndexStmts = new ArrayList<String>(); // to create indices on new soup table
List<ContentValues> soupIndexMapInserts = new ArrayList<ContentValues>(); // to be inserted in soup index map table
createTableStmt.append("CREATE TABLE ").append(soupTableName).append(" (")
.append(ID_COL).append(" INTEGER PRIMARY KEY AUTOINCREMENT")
.append(", ").append(SOUP_COL).append(" TEXT")
.append(", ").append(CREATED_COL).append(" INTEGER")
.append(", ").append(LAST_MODIFIED_COL).append(" INTEGER");
int i = 0;
IndexSpec[] indexSpecsToCache = new IndexSpec[indexSpecs.length];
for (IndexSpec indexSpec : indexSpecs) {
// for create table
String columnName = soupTableName + "_" + i;
String columnType = indexSpec.type.getColumnType();
createTableStmt.append(", ").append(columnName).append(" ").append(columnType);
// for insert
ContentValues values = new ContentValues();
values.put(SOUP_NAME_COL, soupName);
values.put(PATH_COL, indexSpec.path);
values.put(COLUMN_NAME_COL, columnName);
values.put(COLUMN_TYPE_COL, indexSpec.type.toString());
soupIndexMapInserts.add(values);
// for create index
String indexName = soupTableName + "_" + i + "_idx";
createIndexStmts.add(String.format("CREATE INDEX %s on %s ( %s )", indexName, soupTableName, columnName));;
// for the cache
indexSpecsToCache[i] = new IndexSpec(indexSpec.path, indexSpec.type, columnName);
i++;
}
createTableStmt.append(")");
// Run SQL for creating soup table and its indices
db.execSQL(createTableStmt.toString());
for (String createIndexStmt : createIndexStmts) {
db.execSQL(createIndexStmt.toString());
}
try {
db.beginTransaction();
for (ContentValues values : soupIndexMapInserts) {
DBHelper.INSTANCE.insert(db, SOUP_INDEX_MAP_TABLE, values);
}
db.setTransactionSuccessful();
// Add to soupNameToTableNamesMap
DBHelper.INSTANCE.cacheTableName(soupName, soupTableName);
// Add to soupNameToIndexSpecsMap
DBHelper.INSTANCE.cacheIndexSpecs(soupName, indexSpecsToCache);
}
finally {
db.endTransaction();
}
}
/**
* Check if soup exists
*
* @param soupName
* @return true if soup exists, false otherwise
*/
public boolean hasSoup(String soupName) {
return DBHelper.INSTANCE.getSoupTableName(db, soupName) != null;
}
/**
* Destroy a soup
*
* Drop table for soupName
* Cleanup entries in soup index map table
* @param soupName
*/
public void dropSoup(String soupName) {
String soupTableName = DBHelper.INSTANCE.getSoupTableName(db, soupName);
if (soupTableName != null) {
db.execSQL("DROP TABLE IF EXISTS " + soupTableName);
try {
db.beginTransaction();
DBHelper.INSTANCE.delete(db, SOUP_NAMES_TABLE, SOUP_NAME_PREDICATE, soupName);
DBHelper.INSTANCE.delete(db, SOUP_INDEX_MAP_TABLE, SOUP_NAME_PREDICATE, soupName);
db.setTransactionSuccessful();
// Remove from cache
DBHelper.INSTANCE.removeFromCache(soupName);
}
finally {
db.endTransaction();
}
}
}
/**
* Destroy all the soups in the smartstore
*/
public void dropAllSoups() {
List<String> soupNames = getAllSoupNames();
for(String soupName : soupNames) {
dropSoup(soupName);
}
}
/**
* @return all soup names in the smartstore
*/
public List<String> getAllSoupNames() {
List<String> soupNames = new ArrayList<String>();
Cursor cursor = null;
try {
cursor = DBHelper.INSTANCE.query(db, SOUP_NAMES_TABLE, new String[] {SOUP_NAME_COL}, null, null, null);
if (cursor.moveToFirst()) {
do {
soupNames.add(cursor.getString(0));
}
while (cursor.moveToNext());
}
}
finally {
safeClose(cursor);
}
return soupNames;
}
/**
* Run a query given by its query Spec, only returned results from selected page
* @param querySpec
* @param pageIndex
* @throws JSONException
*/
public JSONArray query(QuerySpec querySpec, int pageIndex) throws JSONException {
QueryType qt = querySpec.queryType;
String sql = convertSmartSql(querySpec.smartSql);
// Page
int offsetRows = querySpec.pageSize * pageIndex;
int numberRows = querySpec.pageSize;
String limit = offsetRows + "," + numberRows;
Cursor cursor = null;
try {
cursor = DBHelper.INSTANCE.limitRawQuery(db, sql, limit, querySpec.getArgs());
JSONArray results = new JSONArray();
if (cursor.moveToFirst()) {
do {
// Smart queries
if (qt == QueryType.smart) {
results.put(getDataFromRow(cursor));
}
// Exact/like/range queries
else {
results.put(new JSONObject(cursor.getString(0)));
}
}
while (cursor.moveToNext());
}
return results;
}
finally {
safeClose(cursor);
}
}
/**
* Return JSONArray for one row of data from cursor
* @param cursor
* @return
* @throws JSONException
*/
private JSONArray getDataFromRow(Cursor cursor) throws JSONException {
JSONArray row = new JSONArray();
int columnCount = cursor.getColumnCount();
for (int i=0; i<columnCount; i++) {
String raw = cursor.getString(i);
// Is this column holding a serialized json object?
if (cursor.getColumnName(i).endsWith(SOUP_COL)) {
row.put(new JSONObject(raw));
// Note: we could end up returning a string if you aliased the column
}
else {
// Is it holding a integer
try {
Long n = Long.parseLong(raw);
row.put(n);
// Note: we could end up returning an integer for a string column if you have a string value that contains just an integer
}
// It must be holding a string then
catch (NumberFormatException e) {
row.put(raw);
}
// cursor.getType is API 11 and above
}
}
return row;
}
/**
* @param querySpec
* @return count of results for a "smart" query
*/
public int countQuery(QuerySpec querySpec) {
String sql = convertSmartSql(querySpec.smartSql);
return DBHelper.INSTANCE.countRawQuery(db, sql, querySpec.getArgs());
}
/**
* @param smartSql
* @return
*/
public String convertSmartSql(String smartSql) {
return SmartSqlHelper.INSTANCE.convertSmartSql(db, smartSql);
}
/**
* Create (and commits)
* Note: Passed soupElt is modified (last modified date and soup entry id fields)
* @param soupName
* @param soupElt
* @return soupElt created or null if creation failed
* @throws JSONException
*/
public JSONObject create(String soupName, JSONObject soupElt) throws JSONException {
return create(soupName, soupElt, true);
}
/**
* Create
* Note: Passed soupElt is modified (last modified date and soup entry id fields)
* @param soupName
* @param soupElt
* @return
* @throws JSONException
*/
public JSONObject create(String soupName, JSONObject soupElt, boolean handleTx) throws JSONException {
String soupTableName = DBHelper.INSTANCE.getSoupTableName(db, soupName);
if (soupTableName == null) throw new SmartStoreException("Soup: " + soupName + " does not exist");
IndexSpec[] indexSpecs = DBHelper.INSTANCE.getIndexSpecs(db, soupName);
try {
if (handleTx) {
db.beginTransaction();
}
long now = System.currentTimeMillis();
long soupEntryId = DBHelper.INSTANCE.getNextId(db, soupTableName);
// Adding fields to soup element
soupElt.put(SOUP_ENTRY_ID, soupEntryId);
soupElt.put(SOUP_LAST_MODIFIED_DATE, now);
ContentValues contentValues = new ContentValues();
contentValues.put(ID_COL, soupEntryId);
contentValues.put(SOUP_COL, "");
contentValues.put(CREATED_COL, now);
contentValues.put(LAST_MODIFIED_COL, now);
contentValues.put(SOUP_COL, soupElt.toString());
for (IndexSpec indexSpec : indexSpecs) {
projectIndexedPaths(soupElt, contentValues, indexSpec);
}
// Inserting into database
boolean success = DBHelper.INSTANCE.insert(db, soupTableName, contentValues) == soupEntryId;
// Commit if successful
if (success) {
if (handleTx) {
db.setTransactionSuccessful();
}
return soupElt;
}
else {
return null;
}
}
finally {
if (handleTx) {
db.endTransaction();
}
}
}
/**
* @param soupElt
* @param contentValues
* @param indexSpec
*/
private void projectIndexedPaths(JSONObject soupElt, ContentValues contentValues, IndexSpec indexSpec) {
Object value = project(soupElt, indexSpec.path);
switch (indexSpec.type) {
case integer:
contentValues.put(indexSpec.columnName, (Integer) value); break;
case string:
contentValues.put(indexSpec.columnName, value != null ? value.toString() : null); break;
case floating:
contentValues.put(indexSpec.columnName, (Double) value); break;
}
}
/**
* Retrieve
* @param soupName
* @param soupEntryIds
* @return JSONArray of JSONObject's with the given soupEntryIds
* @throws JSONException
*/
public JSONArray retrieve(String soupName, Long... soupEntryIds) throws JSONException {
String soupTableName = DBHelper.INSTANCE.getSoupTableName(db, soupName);
if (soupTableName == null) throw new SmartStoreException("Soup: " + soupName + " does not exist");
Cursor cursor = null;
try {
JSONArray result = new JSONArray();
cursor = DBHelper.INSTANCE.query(db, soupTableName, new String[] {SOUP_COL}, null, null, getSoupEntryIdsPredicate(soupEntryIds), (String[]) null);
if (!cursor.moveToFirst()) {
return result;
}
do {
String raw = cursor.getString(cursor.getColumnIndex(SOUP_COL));
result.put(new JSONObject(raw));
}
while (cursor.moveToNext());
return result;
}
finally {
safeClose(cursor);
}
}
/**
* Update (and commits)
* Note: Passed soupElt is modified (last modified date and soup entry id fields)
* @param soupName
* @param soupElt
* @param soupEntryId
* @return soupElt updated or null if update failed
* @throws JSONException
*/
public JSONObject update(String soupName, JSONObject soupElt, long soupEntryId) throws JSONException {
return update(soupName, soupElt, soupEntryId, true);
}
/**
* Update
* Note: Passed soupElt is modified (last modified date and soup entry id fields)
* @param soupName
* @param soupElt
* @param soupEntryId
* @return
* @throws JSONException
*/
public JSONObject update(String soupName, JSONObject soupElt, long soupEntryId, boolean handleTx) throws JSONException {
String soupTableName = DBHelper.INSTANCE.getSoupTableName(db, soupName);
if (soupTableName == null) throw new SmartStoreException("Soup: " + soupName + " does not exist");
IndexSpec[] indexSpecs = DBHelper.INSTANCE.getIndexSpecs(db, soupName);
long now = System.currentTimeMillis();
// In the case of an upsert with external id, _soupEntryId won't be in soupElt
soupElt.put(SOUP_ENTRY_ID, soupEntryId);
// Updating last modified field in soup element
soupElt.put(SOUP_LAST_MODIFIED_DATE, now);
// Preparing data for row
ContentValues contentValues = new ContentValues();
contentValues.put(SOUP_COL, soupElt.toString());
contentValues.put(LAST_MODIFIED_COL, now);
for (IndexSpec indexSpec : indexSpecs) {
projectIndexedPaths(soupElt, contentValues, indexSpec);
}
try {
if (handleTx) {
db.beginTransaction();
}
boolean success = DBHelper.INSTANCE.update(db, soupTableName, contentValues, SOUP_ENTRY_ID_PREDICATE, soupEntryId + "") == 1;
if (success) {
if (handleTx) {
db.setTransactionSuccessful();
}
return soupElt;
}
else {
return null;
}
}
finally {
if (handleTx) {
db.endTransaction();
}
}
}
/**
* Upsert (and commits)
* @param soupName
* @param soupElt
* @param externalIdPath
* @return soupElt upserted or null if upsert failed
* @throws JSONException
*/
public JSONObject upsert(String soupName, JSONObject soupElt, String externalIdPath) throws JSONException {
return upsert(soupName, soupElt, externalIdPath, true);
}
/**
* Upsert (and commits) expecting _soupEntryId in soupElt for updates
* @param soupName
* @param soupElt
* @return
* @throws JSONException
*/
public JSONObject upsert(String soupName, JSONObject soupElt) throws JSONException {
return upsert(soupName, soupElt, SOUP_ENTRY_ID);
}
/**
* Upsert
* @param soupName
* @param soupElt
* @param externalIdPath
* @param handleTx
* @return
* @throws JSONException
*/
public JSONObject upsert(String soupName, JSONObject soupElt, String externalIdPath, boolean handleTx) throws JSONException {
long entryId = -1;
if (externalIdPath.equals(SOUP_ENTRY_ID)) {
if (soupElt.has(SOUP_ENTRY_ID)) {
entryId = soupElt.getLong(SOUP_ENTRY_ID);
}
}
else {
Object externalIdObj = project(soupElt, externalIdPath);
if (externalIdObj != null) {
entryId = lookupSoupEntryId(soupName, externalIdPath, externalIdObj + "");
}
}
// If we have an entryId, let's do an update, otherwise let's do a create
if (entryId != -1) {
return update(soupName, soupElt, entryId, handleTx);
}
else {
return create(soupName, soupElt, handleTx);
}
}
/**
* Look for a soup element where fieldPath's value is fieldValue
* Return its soupEntryId
* Return -1 if not found
* Throw an exception if fieldName is not indexed
* Throw an exception if more than one soup element are found
*
* @param soupName
* @param fieldPath
* @param fieldValue
*/
public long lookupSoupEntryId(String soupName, String fieldPath, String fieldValue) {
String soupTableName = DBHelper.INSTANCE.getSoupTableName(db, soupName);
if (soupTableName == null) throw new SmartStoreException("Soup: " + soupName + " does not exist");
String columnName = DBHelper.INSTANCE.getColumnNameForPath(db, soupName, fieldPath);
Cursor cursor = null;
try {
cursor = db.query(soupTableName, new String[] {ID_COL}, columnName + " = ?", new String[] { fieldValue }, null, null, null);
if (cursor.getCount() > 1) {
throw new SmartStoreException(String.format("There are more than one soup elements where %s is %s", fieldPath, fieldValue));
}
if (cursor.moveToFirst()) {
return cursor.getLong(0);
}
else {
return -1; // not found
}
}
finally {
safeClose(cursor);
}
}
/**
* Delete (and commits)
* @param soupName
* @param soupEntryIds
*/
public void delete(String soupName, Long... soupEntryIds) {
delete(soupName, soupEntryIds, true);
}
/**
* Delete
* @param soupName
* @param soupEntryId
* @param handleTx
*/
public void delete(String soupName, Long[] soupEntryIds, boolean handleTx) {
String soupTableName = DBHelper.INSTANCE.getSoupTableName(db, soupName);
if (soupTableName == null) throw new SmartStoreException("Soup: " + soupName + " does not exist");
if (handleTx) {
db.beginTransaction();
}
try {
db.delete(soupTableName, getSoupEntryIdsPredicate(soupEntryIds), (String []) null);
if (handleTx) {
db.setTransactionSuccessful();
}
}
finally {
if (handleTx) {
db.endTransaction();
}
}
}
/**
* @return predicate to match soup entries by id
*/
protected String getSoupEntryIdsPredicate(Long[] soupEntryIds) {
return ID_COL + " IN (" + TextUtils.join(",", soupEntryIds)+ ")";
}
/**
* @param soupId
* @return
*/
public static String getSoupTableName(long soupId) {
return "TABLE_" + soupId;
}
/**
* @param cursor
*/
protected void safeClose(Cursor cursor) {
if (cursor != null) {
cursor.close();
}
}
/**
* @param soup
* @param path
* @return object at path in soup
*/
public static Object project(JSONObject soup, String path) {
if (soup == null) {
return null;
}
if (path == null || path.equals("")) {
return soup;
}
String[] pathElements = path.split("[.]");
Object o = soup;
for (String pathElement : pathElements) {
o = ((JSONObject) o).opt(pathElement);
}
return o;
}
/**
* Enum for column type
*/
public enum Type {
string("TEXT"), integer("INTEGER"), floating("REAL");
private String columnType;
private Type(String columnType) {
this.columnType = columnType;
}
public String getColumnType() {
return columnType;
}
}
/**
* Exception thrown by smart store
*
*/
public static class SmartStoreException extends RuntimeException {
public SmartStoreException(String message) {
super(message);
}
private static final long serialVersionUID = -6369452803270075464L;
}
}
| 1 | 13,835 | Unrelated bugfix for NPE. | forcedotcom-SalesforceMobileSDK-Android | java |
@@ -387,7 +387,7 @@ class AdminController extends Controller
throw new \Exception(sprintf('The "%s" property is not a switchable toggle.', $propertyName));
}
- if (!$propertyMetadata['canBeSet']) {
+ if (!$propertyMetadata['isWritable']) {
throw new \Exception(sprintf('It\'s not possible to toggle the value of the "%s" boolean property of the "%s" entity.', $propertyName, $this->entity['name']));
}
| 1 | <?php
/*
* This file is part of the EasyAdminBundle.
*
* (c) Javier Eguiluz <[email protected]>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
namespace JavierEguiluz\Bundle\EasyAdminBundle\Controller;
use Doctrine\DBAL\Platforms\PostgreSqlPlatform;
use Doctrine\ORM\EntityManager;
use Symfony\Component\EventDispatcher\GenericEvent;
use Symfony\Component\Form\Form;
use Symfony\Component\Form\FormBuilder;
use Symfony\Component\Form\FormBuilderInterface;
use Symfony\Component\Form\FormInterface;
use Symfony\Component\HttpFoundation\RedirectResponse;
use Symfony\Component\HttpFoundation\Request;
use Symfony\Component\HttpFoundation\Response;
use Symfony\Bundle\FrameworkBundle\Controller\Controller;
use Sensio\Bundle\FrameworkExtraBundle\Configuration\Route;
use Pagerfanta\Pagerfanta;
use Pagerfanta\Adapter\DoctrineORMAdapter;
use JavierEguiluz\Bundle\EasyAdminBundle\Event\EasyAdminEvents;
use JavierEguiluz\Bundle\EasyAdminBundle\Exception\NoEntitiesConfiguredException;
use JavierEguiluz\Bundle\EasyAdminBundle\Exception\UndefinedEntityException;
use JavierEguiluz\Bundle\EasyAdminBundle\Exception\ForbiddenActionException;
/**
* The controller used to render all the default EasyAdmin actions.
*
* @author Javier Eguiluz <[email protected]>
*/
class AdminController extends Controller
{
protected $config;
protected $entity = array();
/** @var Request */
protected $request;
/** @var EntityManager */
protected $em;
/**
* @Route("/", name="easyadmin")
* @Route("/", name="admin")
*
* The 'admin' route is deprecated since version 1.8.0 and it will be removed in 2.0.
*
* @param Request $request
*
* @return RedirectResponse|Response
*/
public function indexAction(Request $request)
{
$this->initialize($request);
if (null === $request->query->get('entity')) {
return $this->redirect($this->generateUrl('easyadmin', array('action' => 'list', 'entity' => $this->config['default_entity_name'])));
}
$action = $request->query->get('action', 'list');
if (!$this->isActionAllowed($action)) {
throw new ForbiddenActionException(array('action' => $action, 'entity' => $this->entity['name']));
}
$customMethodName = $action.$this->entity['name'].'Action';
$defaultMethodName = $action.'Action';
return method_exists($this, $customMethodName) ? $this->{$customMethodName}() : $this->{$defaultMethodName}();
}
/**
* Utility method which initializes the configuration of the entity on which
* the user is performing the action.
*
* @param Request $request
*/
protected function initialize(Request $request)
{
$this->dispatch(EasyAdminEvents::PRE_INITIALIZE);
$this->config = $this->container->getParameter('easyadmin.config');
if (0 === count($this->config['entities'])) {
throw new NoEntitiesConfiguredException();
}
// this condition happens when accessing the backend homepage, which
// then redirects to the 'list' action of the first configured entity
if (null === $entityName = $request->query->get('entity')) {
return;
}
if (!array_key_exists($entityName, $this->config['entities'])) {
throw new UndefinedEntityException(array('entity_name' => $entityName));
}
$this->entity = $this->get('easyadmin.configurator')->getEntityConfiguration($entityName);
if (!$request->query->has('sortField')) {
$request->query->set('sortField', $this->entity['primary_key_field_name']);
}
if (!$request->query->has('sortDirection') || !in_array(strtoupper($request->query->get('sortDirection')), array('ASC', 'DESC'))) {
$request->query->set('sortDirection', 'DESC');
}
$this->em = $this->getDoctrine()->getManagerForClass($this->entity['class']);
$this->request = $request;
$this->dispatch(EasyAdminEvents::POST_INITIALIZE);
}
protected function dispatch($eventName, array $arguments = array())
{
$arguments = array_replace(array(
'config' => $this->config,
'em' => $this->em,
'entity' => $this->entity,
'request' => $this->request,
), $arguments);
$subject = isset($arguments['paginator']) ? $arguments['paginator'] : $arguments['entity'];
$event = new GenericEvent($subject, $arguments);
$this->get('event_dispatcher')->dispatch($eventName, $event);
}
/**
* The method that is executed when the user performs a 'list' action on an entity.
*
* @return Response
*/
protected function listAction()
{
$this->dispatch(EasyAdminEvents::PRE_LIST);
$fields = $this->entity['list']['fields'];
$paginator = $this->findAll($this->entity['class'], $this->request->query->get('page', 1), $this->config['list']['max_results'], $this->request->query->get('sortField'), $this->request->query->get('sortDirection'));
$this->dispatch(EasyAdminEvents::POST_LIST, array('paginator' => $paginator));
return $this->render($this->entity['templates']['list'], array(
'paginator' => $paginator,
'fields' => $fields,
));
}
/**
* The method that is executed when the user performs a 'edit' action on an entity.
*
* @return RedirectResponse|Response
*/
protected function editAction()
{
$this->dispatch(EasyAdminEvents::PRE_EDIT);
if ($this->request->isXmlHttpRequest()) {
return $this->ajaxEdit();
}
$id = $this->request->query->get('id');
$easyadmin = $this->request->attributes->get('easyadmin');
$entity = $easyadmin['item'];
$fields = $this->entity['edit']['fields'];
if (method_exists($this, $customMethodName = 'create'.$this->entity['name'].'EditForm')) {
$editForm = $this->{$customMethodName}($entity, $fields);
} else {
$editForm = $this->createEditForm($entity, $fields);
}
$deleteForm = $this->createDeleteForm($this->entity['name'], $id);
$editForm->handleRequest($this->request);
if ($editForm->isValid()) {
$this->dispatch(EasyAdminEvents::PRE_UPDATE, array('entity' => $entity));
if (method_exists($this, $customMethodName = 'preUpdate'.$this->entity['name'].'Entity')) {
$this->{$customMethodName}($entity);
} else {
$this->preUpdateEntity($entity);
}
$this->em->flush();
$this->dispatch(EasyAdminEvents::POST_UPDATE, array('entity' => $entity));
$refererUrl = $this->request->query->get('referer', '');
return !empty($refererUrl)
? $this->redirect(urldecode($refererUrl))
: $this->redirect($this->generateUrl('easyadmin', array('action' => 'list', 'entity' => $this->entity['name'])));
}
$this->dispatch(EasyAdminEvents::POST_EDIT);
return $this->render($this->entity['templates']['edit'], array(
'form' => $editForm->createView(),
'entity_fields' => $fields,
'entity' => $entity,
'delete_form' => $deleteForm->createView(),
));
}
/**
* The method that is executed when the user performs a 'show' action on an entity.
*
* @return Response
*/
protected function showAction()
{
$this->dispatch(EasyAdminEvents::PRE_SHOW);
$id = $this->request->query->get('id');
$easyadmin = $this->request->attributes->get('easyadmin');
$entity = $easyadmin['item'];
$fields = $this->entity['show']['fields'];
$deleteForm = $this->createDeleteForm($this->entity['name'], $id);
$this->dispatch(EasyAdminEvents::POST_SHOW, array(
'deleteForm' => $deleteForm,
'fields' => $fields,
'entity' => $entity,
));
return $this->render($this->entity['templates']['show'], array(
'entity' => $entity,
'fields' => $fields,
'delete_form' => $deleteForm->createView(),
));
}
/**
* The method that is executed when the user performs a 'new' action on an entity.
*
* @return RedirectResponse|Response
*/
protected function newAction()
{
$this->dispatch(EasyAdminEvents::PRE_NEW);
if (method_exists($this, $customMethodName = 'createNew'.$this->entity['name'].'Entity')) {
$entity = $this->{$customMethodName}();
} else {
$entity = $this->createNewEntity();
}
$easyadmin = $this->request->attributes->get('easyadmin');
$easyadmin['item'] = $entity;
$this->request->attributes->set('easyadmin', $easyadmin);
$fields = $this->entity['new']['fields'];
if (method_exists($this, $customMethodName = 'create'.$this->entity['name'].'NewForm')) {
$newForm = $this->{$customMethodName}($entity, $fields);
} else {
$newForm = $this->createNewForm($entity, $fields);
}
$newForm->handleRequest($this->request);
if ($newForm->isValid()) {
$this->dispatch(EasyAdminEvents::PRE_PERSIST, array('entity' => $entity));
if (method_exists($this, $customMethodName = 'prePersist'.$this->entity['name'].'Entity')) {
$this->{$customMethodName}($entity);
} else {
$this->prePersistEntity($entity);
}
$this->em->persist($entity);
$this->em->flush();
$this->dispatch(EasyAdminEvents::POST_PERSIST, array('entity' => $entity));
return $this->redirect($this->generateUrl('easyadmin', array('action' => 'list', 'entity' => $this->entity['name'])));
}
$this->dispatch(EasyAdminEvents::POST_NEW, array(
'entity_fields' => $fields,
'form' => $newForm,
'entity' => $entity,
));
return $this->render($this->entity['templates']['new'], array(
'form' => $newForm->createView(),
'entity_fields' => $fields,
'entity' => $entity,
));
}
/**
* The method that is executed when the user performs a 'delete' action to
* remove any entity.
*
* @return RedirectResponse
*/
protected function deleteAction()
{
$this->dispatch(EasyAdminEvents::PRE_DELETE);
if ('DELETE' !== $this->request->getMethod()) {
return $this->redirect($this->generateUrl('easyadmin', array('action' => 'list', 'entity' => $this->entity['name'])));
}
$id = $this->request->query->get('id');
$form = $this->createDeleteForm($this->entity['name'], $id);
$form->handleRequest($this->request);
if ($form->isValid()) {
$easyadmin = $this->request->attributes->get('easyadmin');
$entity = $easyadmin['item'];
$this->dispatch(EasyAdminEvents::PRE_REMOVE, array('entity' => $entity));
if (method_exists($this, $customMethodName = 'preRemove'.$this->entity['name'].'Entity')) {
$this->{$customMethodName}($entity);
} else {
$this->preRemoveEntity($entity);
}
$this->em->remove($entity);
$this->em->flush();
$this->dispatch(EasyAdminEvents::POST_REMOVE, array('entity' => $entity));
}
$refererUrl = $this->request->query->get('referer', '');
$this->dispatch(EasyAdminEvents::POST_DELETE);
return !empty($refererUrl)
? $this->redirect(urldecode($refererUrl))
: $this->redirect($this->generateUrl('easyadmin', array('action' => 'list', 'entity' => $this->entity['name'])));
}
/**
* The method that is executed when the user performs a query on an entity.
*
* @return Response
*/
protected function searchAction()
{
$this->dispatch(EasyAdminEvents::PRE_SEARCH);
$searchableFields = $this->entity['search']['fields'];
$paginator = $this->findBy($this->entity['class'], $this->request->query->get('query'), $searchableFields, $this->request->query->get('page', 1), $this->config['list']['max_results']);
$fields = $this->entity['list']['fields'];
$this->dispatch(EasyAdminEvents::POST_SEARCH, array(
'fields' => $fields,
'paginator' => $paginator,
));
return $this->render($this->entity['templates']['list'], array(
'paginator' => $paginator,
'fields' => $fields,
));
}
/**
* Modifies the entity properties via an Ajax call. Currently it's used for
* changing the value of boolean properties when the user clicks on the
* flip switched displayed for boolean values in the 'list' action.
*/
protected function ajaxEdit()
{
$this->dispatch(EasyAdminEvents::PRE_EDIT);
if (!$entity = $this->em->getRepository($this->entity['class'])->find($this->request->query->get('id'))) {
throw new \Exception('The entity does not exist.');
}
$propertyName = $this->request->query->get('property');
$propertyMetadata = $this->entity['list']['fields'][$propertyName];
if (!isset($this->entity['list']['fields'][$propertyName]) || 'toggle' != $propertyMetadata['dataType']) {
throw new \Exception(sprintf('The "%s" property is not a switchable toggle.', $propertyName));
}
if (!$propertyMetadata['canBeSet']) {
throw new \Exception(sprintf('It\'s not possible to toggle the value of the "%s" boolean property of the "%s" entity.', $propertyName, $this->entity['name']));
}
$newValue = ('true' === strtolower($this->request->query->get('newValue'))) ? true : false;
$this->dispatch(EasyAdminEvents::PRE_UPDATE, array('entity' => $entity, 'newValue' => $newValue));
if (null !== $setter = $propertyMetadata['setter']) {
$entity->{$setter}($newValue);
} else {
$entity->{$propertyName} = $newValue;
}
$this->em->flush();
$this->dispatch(EasyAdminEvents::POST_UPDATE, array('entity' => $entity, 'newValue' => $newValue));
$this->dispatch(EasyAdminEvents::POST_EDIT);
return new Response((string) $newValue);
}
/**
* Creates a new object of the current managed entity.
* This method is mostly here for override convenience, because it allows
* the user to use his own method to customize the entity instantiation.
*
* @return object
*/
protected function createNewEntity()
{
$entityFullyQualifiedClassName = $this->entity['class'];
return new $entityFullyQualifiedClassName();
}
/**
* Allows applications to modify the entity associated with the item being
* created before persisting it.
*
* @param object $entity
*/
protected function prePersistEntity($entity)
{
}
/**
* Allows applications to modify the entity associated with the item being
* edited before persisting it.
*
* @param object $entity
*/
protected function preUpdateEntity($entity)
{
}
/**
* Allows applications to modify the entity associated with the item being
* deleted before removing it.
*
* @param object $entity
*/
protected function preRemoveEntity($entity)
{
}
/**
* Performs a database query to get all the records related to the given
* entity. It supports pagination and field sorting.
*
* @param string $entityClass
* @param int $page
* @param int $maxPerPage
* @param string|null $sortField
* @param string|null $sortDirection
*
* @return Pagerfanta The paginated query results
*/
protected function findAll($entityClass, $page = 1, $maxPerPage = 15, $sortField = null, $sortDirection = null)
{
$query = $this->em->createQueryBuilder()
->select('entity')
->from($entityClass, 'entity')
;
if (null !== $sortField) {
if (empty($sortDirection) || !in_array(strtoupper($sortDirection), array('ASC', 'DESC'))) {
$sortDirection = 'DESC';
}
$query->orderBy('entity.'.$sortField, $sortDirection);
}
$paginator = new Pagerfanta(new DoctrineORMAdapter($query, false));
$paginator->setMaxPerPage($maxPerPage);
$paginator->setCurrentPage($page);
return $paginator;
}
/**
* Performs a database query based on the search query provided by the user.
* It supports pagination and field sorting.
*
* @param string $entityClass
* @param string $searchQuery
* @param array $searchableFields
* @param int $page
* @param int $maxPerPage
*
* @return Pagerfanta The paginated query results
*/
protected function findBy($entityClass, $searchQuery, array $searchableFields, $page = 1, $maxPerPage = 15)
{
$databaseIsPostgreSql = $this->isPostgreSqlUsedByEntity($entityClass);
$queryBuilder = $this->em->createQueryBuilder()->select('entity')->from($entityClass, 'entity');
$queryConditions = $queryBuilder->expr()->orX();
$queryParameters = array();
foreach ($searchableFields as $name => $metadata) {
$isNumericField = in_array($metadata['dataType'], array('integer', 'number', 'smallint', 'bigint', 'decimal', 'float'));
$isTextField = in_array($metadata['dataType'], array('string', 'text', 'guid'));
if (is_numeric($searchQuery) && $isNumericField) {
$queryConditions->add(sprintf('entity.%s = :exact_query', $name));
$queryParameters['exact_query'] = 0 + $searchQuery; // adding '0' turns the string into a numeric value
} elseif ($isTextField) {
$queryConditions->add(sprintf('entity.%s LIKE :fuzzy_query', $name));
$queryParameters['fuzzy_query'] = '%'.$searchQuery.'%';
} else {
// PostgreSQL doesn't allow to compare string values with non-string columns (e.g. 'id')
if ($databaseIsPostgreSql) {
continue;
}
$queryConditions->add(sprintf('entity.%s IN (:words)', $name));
$queryParameters['words'] = explode(' ', $searchQuery);
}
}
$queryBuilder->add('where', $queryConditions)->setParameters($queryParameters);
$paginator = new Pagerfanta(new DoctrineORMAdapter($queryBuilder, false));
$paginator->setMaxPerPage($maxPerPage);
$paginator->setCurrentPage($page);
return $paginator;
}
/**
* Creates the form used to edit an entity.
*
* @param object $entity
* @param array $entityProperties
*
* @return Form
*/
protected function createEditForm($entity, array $entityProperties)
{
return $this->createEntityForm($entity, $entityProperties, 'edit');
}
/**
* Creates the form used to create an entity.
*
* @param object $entity
* @param array $entityProperties
*
* @return Form
*/
protected function createNewForm($entity, array $entityProperties)
{
return $this->createEntityForm($entity, $entityProperties, 'new');
}
/**
* Creates the form builder of the form used to create or edit the given entity.
*
* @param object $entity
* @param string $view The name of the view where this form is used ('new' or 'edit')
*
* @return FormBuilder
*/
protected function createEntityFormBuilder($entity, $view)
{
$formOptions = $this->entity[$view]['form_options'];
$formOptions['entity'] = $this->entity['name'];
$formOptions['view'] = $view;
return $this->get('form.factory')->createNamedBuilder('form', 'easyadmin', $entity, $formOptions);
}
/**
* Creates the form object used to create or edit the given entity.
*
* @param object $entity
* @param array $entityProperties
* @param string $view
*
* @return Form
*
* @throws \Exception
*/
protected function createEntityForm($entity, array $entityProperties, $view)
{
if (method_exists($this, $customMethodName = 'create'.$this->entity['name'].'EntityForm')) {
$form = $this->{$customMethodName}($entity, $entityProperties, $view);
if (!$form instanceof FormInterface) {
throw new \Exception(sprintf(
'The "%s" method must return a FormInterface, "%s" given.',
$customMethodName, is_object($form) ? get_class($form) : gettype($form)
));
}
return $form;
}
if (method_exists($this, $customBuilderMethodName = 'create'.$this->entity['name'].'EntityFormBuilder')) {
$formBuilder = $this->{$customBuilderMethodName}($entity, $entityProperties, $view);
} else {
$formBuilder = $this->createEntityFormBuilder($entity, $view);
}
if (!$formBuilder instanceof FormBuilderInterface) {
throw new \Exception(sprintf(
'The "%s" method must return a FormBuilderInterface, "%s" given.',
'createEntityForm', is_object($formBuilder) ? get_class($formBuilder) : gettype($formBuilder)
));
}
return $formBuilder->getForm();
}
/**
* Creates the form used to delete an entity. It must be a form because
* the deletion of the entity are always performed with the 'DELETE' HTTP method,
* which requires a form to work in the current browsers.
*
* @param string $entityName
* @param int $entityId
*
* @return Form
*/
protected function createDeleteForm($entityName, $entityId)
{
return $this->createFormBuilder()
->setAction($this->generateUrl('easyadmin', array('action' => 'delete', 'entity' => $entityName, 'id' => $entityId)))
->setMethod('DELETE')
->add('submit', 'submit', array('label' => 'Delete'))
->getForm()
;
}
/**
* Utility shortcut to render a template as a 404 error page.
*
* @param string $view
* @param array $parameters
*
* @deprecated Use an appropriate exception instead of this method.
*
* @return Response
*/
protected function render404error($view, array $parameters = array())
{
return $this->render($view, $parameters, new Response('', 404));
}
/**
* Utility method that checks if the given action is allowed for
* the current entity.
*
* @param string $actionName
*
* @return bool
*/
protected function isActionAllowed($actionName)
{
return false === in_array($actionName, $this->entity['disabled_actions'], true);
}
/**
* Utility shortcut to render an error when the requested action is not allowed
* for the given entity.
*
* @param string $action
*
* @deprecated Use the ForbiddenException instead of this method.
*
* @return Response
*/
protected function renderForbiddenActionError($action)
{
return $this->render('@EasyAdmin/error/forbidden_action.html.twig', array('action' => $action), new Response('', 403));
}
/**
* It renders the main CSS applied to the backend design. This controller
* allows to generate dynamic CSS files that use variables without the need
* to set up a CSS preprocessing toolchain.
*
* @Route("/_css/admin.css", name="_easyadmin_render_css")
*/
public function renderCssAction()
{
$config = $this->container->getParameter('easyadmin.config');
$cssContent = $this->renderView('@EasyAdmin/css/admin.css.twig', array(
'brand_color' => $config['design']['brand_color'],
'color_scheme' => $config['design']['color_scheme'],
));
$response = new Response($cssContent, 200, array('Content-Type' => 'text/css'));
$response->setPublic();
$response->setSharedMaxAge(600);
return $response;
}
/**
* Returns true if the data of the given entity are stored in a database
* of Type PostgreSQL.
*
* @param string $entityClass
*
* @return bool
*/
private function isPostgreSqlUsedByEntity($entityClass)
{
$em = $this->get('doctrine')->getManagerForClass($entityClass);
return $em->getConnection()->getDatabasePlatform() instanceof PostgreSqlPlatform;
}
}
| 1 | 9,283 | What if some extended the `AdminController` and had a check for the old option name? Could that be a valid use case? | EasyCorp-EasyAdminBundle | php |
@@ -524,6 +524,17 @@
dataType: "json"
});
},
+ fetchSegmentMap: function() {
+ return CV.$.ajax({
+ type: "GET",
+ url: countlyCommon.API_PARTS.data.r + '/data-manager/event-segment',
+ data: {
+ "app_id": countlyCommon.ACTIVE_APP_ID,
+ "preventRequestAbort": true,
+ },
+ dataType: "json"
+ });
+ },
fetchRefreshSelectedEventsData: function(context) {
return CV.$.ajax({
type: "GET", | 1 | /*global countlyVue, CV, _, countlyCommon, jQuery */
(function(countlyAllEvents) {
countlyAllEvents.helpers = {
getLineChartData: function(context, eventData) {
var chartData = eventData.chartData;
var graphData = [[], [], []];
var labels = context.state.labels;
for (var i = 0; i < chartData.length; i++) {
graphData[0].push(chartData[i].c ? chartData[i].c : 0);
graphData[1].push(chartData[i].s ? chartData[i].s : 0);
graphData[2].push(chartData[i].dur ? chartData[i].dur : 0);
}
var series = [];
var countObj = {
name: labels.count,
data: graphData[0],
color: "#017AFF"
};
series.push(countObj);
var sumObj = {
name: labels.sum,
data: graphData[1],
color: "#F96300"
};
series.push(sumObj);
var durObj = {
name: labels.dur,
data: graphData[2],
color: "#FF9382"
};
series.push(durObj);
var obj = {
series: series
};
context.commit('setLineChartData', obj);
},
getTableRows: function(context) {
var eventData = context.state.allEventsProcessed;
var tableRows = eventData.chartData.slice();
var labels = context.state.labels;
if (eventData.tableColumns.indexOf(labels.sum) !== -1 && eventData.tableColumns.indexOf(labels.dur) !== -1) {
tableRows.forEach(function(row) {
row.avgSum = (parseInt(row.c) === 0 || parseInt(row.s) === 0) ? 0 : countlyCommon.formatNumber(row.s / row.c);
row.avgDur = (parseInt(row.c) === 0 || parseInt(row.dur) === 0) ? 0 : countlyCommon.formatNumber(row.dur / row.c);
row.c = countlyCommon.formatNumber(row.c);
row.s = countlyCommon.formatNumber(row.s);
row.dur = countlyCommon.formatNumber(row.dur);
});
eventData.tableColumns.push("AvgSum");
eventData.tableColumns.push("AvgDur");
}
else if (eventData.tableColumns.indexOf(labels.sum) !== -1) {
tableRows.forEach(function(row) {
row.avgSum = (parseInt(row.c) === 0 || parseInt(row.s) === 0) ? 0 : countlyCommon.formatNumber(row.s / row.c);
row.c = countlyCommon.formatNumber(row.c);
row.s = countlyCommon.formatNumber(row.s);
row.dur = countlyCommon.formatNumber(row.dur);
});
eventData.tableColumns.push("AvgSum");
}
else if (eventData.tableColumns.indexOf(labels.dur) !== -1) {
tableRows.forEach(function(row) {
row.avgDur = (parseInt(row.c) === 0 || parseInt(row.dur) === 0) ? 0 : countlyCommon.formatNumber(row.dur / row.c);
row.c = countlyCommon.formatNumber(row.c);
row.s = countlyCommon.formatNumber(row.s);
row.dur = countlyCommon.formatNumber(row.dur);
});
eventData.tableColumns.push("AvgDur");
}
else {
tableRows.forEach(function(row) {
row.c = countlyCommon.formatNumber(row.c);
});
}
return tableRows;
},
getBarChartData: function(context, eventData) {
var arrCount = [];
var arrSum = [];
var arrDuration = [];
var xAxisData = [];
var obj = {};
var xAxis = {};
var legend = {};
var series = [];
var obCount = {};
var obSum = {};
var obDuration = {};
var labels = context.state.labels;
for (var i = 0; i < eventData.chartData.length; i++) {
arrCount.push(eventData.chartData[i].c);
arrSum.push(eventData.chartData[i].s);
arrDuration.push(eventData.chartData[i].dur);
xAxisData.push(eventData.chartData[i].curr_segment);
}
obCount.name = labels.count;
obCount.data = arrCount;
obCount.color = "#017AFF";
xAxis.data = xAxisData;
series.push(obCount);
obSum.name = labels.sum;
obSum.data = arrSum;
obSum.color = "#F96300";
series.push(obSum);
obDuration.name = labels.dur;
obDuration.data = arrDuration;
obDuration.color = "#FF9382";
series.push(obDuration);
legend.show = false;
obj.legend = legend;
obj.series = series;
obj.xAxis = xAxis;
context.commit('setBarData', obj);
},
clearEventsObject: function(obj) {
if (obj) {
if (!obj.c) {
obj.c = 0;
}
if (!obj.s) {
obj.s = 0;
}
if (!obj.dur) {
obj.dur = 0;
}
}
else {
obj = { "c": 0, "s": 0, "dur": 0 };
}
return obj;
},
getEventLongName: function(eventKey, eventMap) {
var mapKey = eventKey.replace("\\", "\\\\").replace("\$", "\\u0024").replace(".", "\\u002e");
if (eventMap && eventMap[mapKey] && eventMap[mapKey].name) {
return eventMap[mapKey].name;
}
else {
return eventKey;
}
},
getEventsData: function(context, res) {
var eventData = { chartData: {}, chartDP: { dp: [], ticks: [] } };
var allEvents = context.state.allEventsData;
var groupData = context.state.groupData.displayMap;
var eventMap = allEvents.map;
var mapKey = context.state.selectedEventName.replace(/\\/g, "\\\\").replace(/\$/g, "\\u0024").replace(/\./g, '\\u002e');
var countString = (mapKey.startsWith('[CLY]_group') && groupData.c) ? groupData.c : (eventMap && eventMap[mapKey] && eventMap[mapKey].count) ? eventMap[mapKey].count : jQuery.i18n.map["events.table.count"];
var sumString = (mapKey.startsWith('[CLY]_group') && groupData.s) ? groupData.s : (eventMap && eventMap[mapKey] && eventMap[mapKey].sum) ? eventMap[mapKey].sum : jQuery.i18n.map["events.table.sum"];
var durString = (mapKey.startsWith('[CLY]_group') && groupData.d) ? groupData.d : (eventMap && eventMap[mapKey] && eventMap[mapKey].dur) ? eventMap[mapKey].dur : jQuery.i18n.map["events.table.dur"];
if (context.state.currentActiveSegmentation !== "segment" && context.state.hasSegments) {
var segments = res.meta[context.state.currentActiveSegmentation].slice();
var tmpEventData = countlyCommon.extractTwoLevelData(res, segments, countlyAllEvents.helpers.clearEventsObject, [
{
name: "curr_segment",
func: function(rangeArr) {
return rangeArr.replace(/:/g, ".").replace(/\[CLY\]/g, "").replace(/.\/\//g, "://");
}
},
{ "name": "c" },
{ "name": "s" },
{ "name": "dur" }
]);
eventData.chartData = tmpEventData.chartData;
var segmentsSum = _.without(_.pluck(eventData.chartData, 's'), false, null, "", undefined, NaN);
var segmentsDur = _.without(_.pluck(eventData.chartData, 'dur'), false, null, "", undefined, NaN);
if (_.reduce(segmentsSum, function(memo, num) {
return memo + num;
}, 0) === 0) {
segmentsSum = [];
}
if (_.reduce(segmentsDur, function(memo, num) {
return memo + num;
}, 0) === 0) {
segmentsDur = [];
}
eventData.eventName = countlyAllEvents.helpers.getEventLongName(context.state.selectedEventName, eventMap);
if (mapKey && eventMap && eventMap[mapKey]) {
eventData.eventDescription = eventMap[mapKey].description || "";
}
eventData.dataLevel = 2;
eventData.tableColumns = [jQuery.i18n.map["events.table.segmentation"], countString];
if (segmentsSum.length || segmentsDur.length) {
if (segmentsSum.length) {
eventData.tableColumns[eventData.tableColumns.length] = sumString;
}
if (segmentsDur.length) {
eventData.tableColumns[eventData.tableColumns.length] = durString;
}
}
else {
_.each(eventData.chartData, function(element, index, list) {
list[index] = _.pick(element, "curr_segment", "c");
});
}
}
else {
var chartData = [
{ data: [], label: countString },
{ data: [], label: sumString },
{ data: [], label: durString }
],
dataProps = [
{ name: "c" },
{ name: "s" },
{ name: "dur" }
];
eventData = countlyCommon.extractChartData(res, countlyAllEvents.helpers.clearEventsObject, chartData, dataProps);
eventData.eventName = countlyAllEvents.helpers.getEventLongName(context.state.selectedEventName, eventMap);
if (mapKey && eventMap && eventMap[mapKey]) {
eventData.eventDescription = eventMap[mapKey].description || "";
}
eventData.dataLevel = 1;
eventData.tableColumns = [jQuery.i18n.map["common.date"], countString];
var cleanSumCol = _.without(_.pluck(eventData.chartData, 's'), false, null, "", undefined, NaN);
var cleanDurCol = _.without(_.pluck(eventData.chartData, 'dur'), false, null, "", undefined, NaN);
var reducedSum = _.reduce(cleanSumCol, function(memo, num) {
return memo + num;
}, 0);
var reducedDur = _.reduce(cleanDurCol, function(memo, num) {
return memo + num;
}, 0);
if (reducedSum !== 0 || reducedDur !== 0) {
if (reducedSum !== 0) {
eventData.tableColumns[eventData.tableColumns.length] = sumString;
}
if (reducedDur !== 0) {
eventData.tableColumns[eventData.tableColumns.length] = durString;
}
}
else {
eventData.chartDP = _.compact(eventData.chartDP);
_.each(eventData.chartData, function(element, index, list) {
list[index] = _.pick(element, "date", "c");
});
}
}
var countArr = _.pluck(eventData.chartData, "c");
if (countArr.length) {
eventData.totalCount = _.reduce(countArr, function(memo, num) {
return memo + num;
}, 0);
}
var sumArr = _.pluck(eventData.chartData, "s");
if (sumArr.length) {
eventData.totalSum = _.reduce(sumArr, function(memo, num) {
return memo + num;
}, 0);
}
var durArr = _.pluck(eventData.chartData, "dur");
if (durArr.length) {
eventData.totalDur = _.reduce(durArr, function(memo, num) {
return memo + num;
}, 0);
}
context.commit('setAllEventsProcessed', eventData);
return eventData;
},
getSegments: function(context, res) {
var segments = [];
if (res.meta && res.meta.segments.length > 0) {
segments = res.meta.segments.slice();
segments.push("segment");
context.commit('setHasSegments', true);
}
else {
context.commit('setHasSegments', false);
}
var eventData = countlyAllEvents.helpers.getEventsData(context, res);
if (context.state.currentActiveSegmentation !== "segment") {
countlyAllEvents.helpers.getBarChartData(context, eventData);
}
else {
countlyAllEvents.helpers.getLineChartData(context, eventData);
}
return segments;
},
getLegendData: function(context) {
if (!context.state.allEventsProcessed) {
return;
}
var lineLegend = {};
var legendData = [];
var eventsOverview = context.state.selectedEventsOverview;
var labels = context.state.labels;
var count = {};
count.name = labels.count;
count.value = eventsOverview.count.total;
count.trend = eventsOverview.count.trend === "u" ? "up" : "down";
count.percentage = eventsOverview.count.change;
count.tooltip = labels.count;
legendData.push(count);
var sum = {};
sum.name = labels.sum;
sum.value = eventsOverview.sum.total;
sum.trend = eventsOverview.sum.trend === "u" ? "up" : "down";
sum.percentage = eventsOverview.sum.change;
sum.tooltip = labels.sum;
legendData.push(sum);
var dur = {};
dur.name = labels.dur;
dur.value = eventsOverview.dur.total;
dur.trend = eventsOverview.dur.trend === "u" ? "up" : "down";
dur.percentage = eventsOverview.dur.change;
dur.tooltip = labels.dur;
legendData.push(dur);
lineLegend.show = true;
lineLegend.type = "primary";
lineLegend.data = legendData;
return lineLegend;
},
getSelectedEventsOverview: function(context, res) {
return res[context.state.selectedEventName].data;
},
getAllEventsList: function(eventsList, groupList) {
var map = eventsList.map || {};
var allEvents = [];
if (eventsList) {
eventsList.list.forEach(function(item) {
if (!map[item] || (map[item] && (map[item].is_visible || map[item].is_visible === undefined))) {
var obj = {
"label": map[item] && map[item].name ? map[item].name : item,
"value": item,
"custom": {
"value": undefined
}
};
allEvents.push(obj);
}
});
}
if (groupList) {
groupList.forEach(function(item) {
if (item.status) {
var obj = {
"label": item.name,
"value": item._id,
"custom": {
"value": "GROUP"
}
};
allEvents.push(obj);
}
});
}
return allEvents;
},
getGroupData: function(groupData, selectedEventName) {
var description = undefined;
var name = "";
var isGroup = false;
var displayMap = {};
if (selectedEventName.startsWith('[CLY]_group')) {
groupData.every(function(item) {
if (item._id === selectedEventName) {
description = item.description;
name = item.name;
isGroup = true;
displayMap = Object.assign({}, item.display_map);
return false;
}
return true;
});
}
return {
"description": description,
"name": name,
"isGroup": isGroup,
"displayMap": displayMap
};
},
getLabels: function(allEventsData, groupData, selectedEventName) {
if (groupData.isGroup) {
return {
count: groupData.displayMap.c ? groupData.displayMap.c : CV.i18n("events.overview.count"),
sum: groupData.displayMap.s ? groupData.displayMap.s : CV.i18n("events.overview.sum"),
dur: groupData.displayMap.d ? groupData.displayMap.d : CV.i18n("events.overview.duration")
};
}
return {
count: allEventsData && allEventsData.map && allEventsData.map[selectedEventName] ? allEventsData.map[selectedEventName].count : CV.i18n("events.overview.count"),
sum: allEventsData && allEventsData.map && allEventsData.map[selectedEventName] ? allEventsData.map[selectedEventName].sum : CV.i18n("events.overview.sum"),
dur: allEventsData && allEventsData.map && allEventsData.map[selectedEventName] ? allEventsData.map[selectedEventName].dur : CV.i18n("events.overview.duration")
};
},
getLimitAlerts: function(context) {
var limitAlert = [];
var allEventsList = context.state.allEventsList;
var limits = context.state.allEventsData.limits;
var eventLimit = {};
var eventSegmentationLimit = {};
var eventSegmentationLimitValue = {};
var availableSegments = context.state.availableSegments.length - 1;
var meta = context.state.selectedEventsData.meta;
var eventsLength = allEventsList.filter(function(item) {
return item.custom.value !== "GROUP";
}).length;
if (eventsLength >= limits.event_limit) {
eventLimit.message = CV.i18n("events.max-event-key-limit", limits.event_limit);
eventLimit.show = true;
limitAlert.push(eventLimit);
}
if (!context.state.selectedEventName.startsWith('[CLY]_group')) {
if (availableSegments >= limits.event_segmentation_limit) {
eventSegmentationLimit.message = CV.i18n("events.max-segmentation-limit", limits.event_segmentation_limit, context.state.allEventsProcessed.eventName);
eventSegmentationLimit.show = true;
limitAlert.push(eventSegmentationLimit);
}
context.state.availableSegments.forEach(function(s) {
if (s !== "segment" && meta[s] && meta[s].length >= limits.event_segmentation_value_limit) {
eventSegmentationLimitValue = {};
eventSegmentationLimitValue.message = CV.i18n("events.max-unique-value-limit", limits.event_segmentation_value_limit, s);
eventSegmentationLimitValue.show = true;
limitAlert.push(eventSegmentationLimitValue);
}
});
}
return limitAlert;
},
getCurrentCategory: function(context) {
if (context.state.allEventsData.map && context.state.allEventsData.map[context.state.selectedEventName]) {
var categoryId = context.state.allEventsData.map[context.state.selectedEventName].category;
if (categoryId && context.state.categoriesMap[categoryId]) {
return context.state.categoriesMap[categoryId];
}
}
return "";
},
extendMeta: function(prevState, selectedEventsData) {
for (var metaObj in selectedEventsData.meta) {
if (prevState.meta[metaObj] && selectedEventsData.meta[metaObj] && prevState.meta[metaObj].length !== selectedEventsData.meta[metaObj].length) {
selectedEventsData.meta[metaObj] = countlyCommon.union(prevState.meta[metaObj], selectedEventsData.meta[metaObj]);
}
}
}
};
countlyAllEvents.service = {
fetchAllEventsData: function(context) {
return CV.$.ajax({
type: "GET",
url: countlyCommon.API_PARTS.data.r,
data: {
"app_id": countlyCommon.ACTIVE_APP_ID,
"method": "get_events",
"period": context.state.selectedDatePeriod,
"preventRequestAbort": true
},
dataType: "json",
});
},
fetchAllEventsGroupData: function() {
return CV.$.ajax({
type: "GET",
url: countlyCommon.API_PARTS.data.r,
data: {
"app_id": countlyCommon.ACTIVE_APP_ID,
"method": "get_event_groups",
"preventRequestAbort": true
},
dataType: "json",
});
},
fetchSelectedEventsData: function(context) {
return CV.$.ajax({
type: "GET",
url: countlyCommon.API_PARTS.data.r,
data: {
"app_id": countlyCommon.ACTIVE_APP_ID,
"method": "events",
"event": context.state.selectedEventName,
"segmentation": context.state.currentActiveSegmentation === "segment" ? "" : context.state.currentActiveSegmentation,
"period": context.state.selectedDatePeriod,
"preventRequestAbort": true
},
dataType: "json",
});
},
fetchSelectedEventsOverview: function(context) {
return CV.$.ajax({
type: "GET",
url: countlyCommon.API_PARTS.data.r,
data: {
"app_id": countlyCommon.ACTIVE_APP_ID,
"method": "events",
"events": JSON.stringify([context.state.selectedEventName]),
"period": context.state.selectedDatePeriod,
"timestamp": new Date().getTime(),
"overview": true
},
dataType: "json",
});
},
fetchCategories: function() {
return CV.$.ajax({
type: "GET",
url: countlyCommon.API_PARTS.data.r + '/data-manager/category',
data: {
"app_id": countlyCommon.ACTIVE_APP_ID,
"preventRequestAbort": true
},
dataType: "json"
});
},
fetchRefreshSelectedEventsData: function(context) {
return CV.$.ajax({
type: "GET",
url: countlyCommon.API_PARTS.data.r,
data: {
"app_id": countlyCommon.ACTIVE_APP_ID,
"method": "events",
"event": context.state.selectedEventName,
"segmentation": context.state.currentActiveSegmentation === "segment" ? "" : context.state.currentActiveSegmentation,
"action": "refresh"
},
dataType: "json",
});
}
};
countlyAllEvents.getVuexModule = function() {
var getInitialState = function() {
return {
allEventsData: {},
allEventsGroupData: [],
selectedEventsData: {},
selectedDatePeriod: countlyCommon.getPeriod(),
selectedEventName: undefined,
groupData: {},
currentActiveSegmentation: "segment",
hasSegments: false,
availableSegments: [],
allEventsProcessed: {},
barData: {},
lineChartData: {},
legendData: {},
tableRows: [],
selectedEventsOverview: {},
allEventsList: [],
labels: [],
limitAlerts: [],
categoriesMap: [],
currentCategory: ""
};
};
var allEventsActions = {
fetchAllEventsData: function(context) {
return countlyAllEvents.service.fetchAllEventsData(context)
.then(function(res) {
if (res) {
context.commit("setAllEventsData", res);
if (!context.state.selectedEventName) {
localStorage.setItem("eventKey", res.list[0]);
context.commit('setSelectedEventName', res.list[0]);
}
context.commit("setCurrentCategory", countlyAllEvents.helpers.getCurrentCategory(context));
countlyAllEvents.service.fetchAllEventsGroupData(context)
.then(function(result) {
if (result) {
context.commit("setAllEventsGroupData", result);
context.commit("setAllEventsList", countlyAllEvents.helpers.getAllEventsList(res, result));
context.commit("setGroupData", countlyAllEvents.helpers.getGroupData(result, context.state.selectedEventName));
context.commit("setLabels", countlyAllEvents.helpers.getLabels(res, context.state.groupData, context.state.selectedEventName));
countlyAllEvents.service.fetchSelectedEventsData(context)
.then(function(response) {
if (response) {
context.commit("setSelectedEventsData", response);
context.commit("setAvailableSegments", countlyAllEvents.helpers.getSegments(context, response) || []);
context.commit("setTableRows", countlyAllEvents.helpers.getTableRows(context) || []);
context.commit("setLimitAlerts", countlyAllEvents.helpers.getLimitAlerts(context) || []);
countlyAllEvents.service.fetchSelectedEventsOverview(context)
.then(function(resp) {
if (resp) {
context.commit("setSelectedEventsOverview", countlyAllEvents.helpers.getSelectedEventsOverview(context, resp) || {});
context.commit("setLegendData", countlyAllEvents.helpers.getLegendData(context || {}));
}
});
}
});
}
});
}
});
},
fetchAllEventsGroupData: function(context) {
return countlyAllEvents.service.fetchAllEventsGroupData(context)
.then(function(res) {
if (res) {
context.commit("setAllEventsGroupData", res);
}
});
},
fetchSelectedEventsData: function(context) {
return countlyAllEvents.service.fetchSelectedEventsData(context)
.then(function(res) {
if (res) {
context.commit("setSelectedEventsData", res);
context.commit("setAvailableSegments", countlyAllEvents.helpers.getSegments(context, res) || []);
context.commit("setTableRows", countlyAllEvents.helpers.getTableRows(context) || []);
}
});
},
fetchSelectedDatePeriod: function(context, period) {
context.commit('setSelectedDatePeriod', period);
},
fetchSelectedEventName: function(context, name) {
localStorage.setItem("eventKey", name);
context.commit('setSelectedEventName', name);
},
fetchCurrentActiveSegmentation: function(context, name) {
context.commit('setCurrentActiveSegmentation', name);
},
fetchHasSegments: function(context, hasSegments) {
context.commit('setHasSegments', hasSegments);
},
fetchCategories: function(context) {
countlyAllEvents.service.fetchCategories().then(function(data) {
var map = {};
data.forEach(function(c) {
map[c._id] = c.name;
});
context.commit('setCategoriesMap', map);
});
},
fetchRefreshAllEventsData: function(context) {
return countlyAllEvents.service.fetchAllEventsData(context)
.then(function(res) {
if (res) {
context.commit("setAllEventsData", res);
if (!context.state.selectedEventName) {
localStorage.setItem("eventKey", res.list[0]);
context.commit('setSelectedEventName', res.list[0]);
}
countlyAllEvents.service.fetchAllEventsGroupData(context)
.then(function(result) {
if (result) {
context.commit("setAllEventsGroupData", result);
context.commit("setAllEventsList", countlyAllEvents.helpers.getAllEventsList(res, result));
context.commit("setGroupData", countlyAllEvents.helpers.getGroupData(result, context.state.selectedEventName));
context.commit("setLabels", countlyAllEvents.helpers.getLabels(res, context.state.groupData, context.state.selectedEventName));
countlyAllEvents.service.fetchRefreshSelectedEventsData(context)
.then(function(response) {
if (response) {
var prevState = Object.assign({}, context.state.selectedEventsData);
countlyCommon.extendDbObj(context.state.selectedEventsData, response);
countlyAllEvents.helpers.extendMeta(prevState, context.state.selectedEventsData);
context.commit("setAvailableSegments", countlyAllEvents.helpers.getSegments(context, context.state.selectedEventsData) || []);
context.commit("setTableRows", countlyAllEvents.helpers.getTableRows(context) || []);
context.commit("setLimitAlerts", countlyAllEvents.helpers.getLimitAlerts(context) || []);
countlyAllEvents.service.fetchSelectedEventsOverview(context)
.then(function(resp) {
if (resp) {
context.commit("setSelectedEventsOverview", countlyAllEvents.helpers.getSelectedEventsOverview(context, resp) || {});
context.commit("setLegendData", countlyAllEvents.helpers.getLegendData(context || {}));
}
});
}
});
}
});
}
});
},
};
var allEventsMutations = {
setAllEventsData: function(state, value) {
state.allEventsData = value;
},
setAllEventsList: function(state, value) {
state.allEventsList = value;
},
setAllEventsGroupData: function(state, value) {
state.allEventsGroupData = value;
},
setSelectedEventsData: function(state, value) {
state.selectedEventsData = value;
},
setSelectedDatePeriod: function(state, value) {
state.selectedDatePeriod = value;
},
setSelectedEventName: function(state, value) {
state.selectedEventName = value;
},
setGroupData: function(state, value) {
state.groupData = value;
},
setCurrentActiveSegmentation: function(state, value) {
state.currentActiveSegmentation = value;
},
setHasSegments: function(state, value) {
state.hasSegments = value;
},
setAvailableSegments: function(state, value) {
state.availableSegments = value;
},
setAllEventsProcessed: function(state, value) {
state.allEventsProcessed = value;
},
setBarData: function(state, value) {
state.barData = value;
},
setLineChartData: function(state, value) {
state.lineChartData = value;
},
setLegendData: function(state, value) {
state.legendData = value;
},
setTableRows: function(state, value) {
state.tableRows = value;
},
setSelectedEventsOverview: function(state, value) {
state.selectedEventsOverview = value;
},
setLabels: function(state, value) {
state.labels = value;
},
setLimitAlerts: function(state, value) {
state.limitAlerts = value;
},
setCategoriesMap: function(state, value) {
state.categoriesMap = value;
},
setCurrentCategory: function(state, value) {
state.currentCategory = value;
},
};
var allEventsGetters = {
allEvents: function(_state) {
return _state.allEventsData;
},
allEventsList: function(_state) {
return _state.allEventsList;
},
allEventsGroup: function(_state) {
return _state.allEventsGroupData;
},
selectedEvent: function(_state) {
return _state.selectedEventsData;
},
selectedDatePeriod: function(_state) {
return _state.selectedDatePeriod;
},
selectedEventName: function(_state) {
return _state.selectedEventName;
},
groupData: function(_state) {
return _state.groupData;
},
currentActiveSegmentation: function(_state) {
return _state.currentActiveSegmentation;
},
hasSegments: function(_state) {
return _state.hasSegments;
},
availableSegments: function(_state) {
return _state.availableSegments;
},
allEventsProcessed: function(_state) {
return _state.allEventsProcessed;
},
barData: function(_state) {
return _state.barData;
},
lineChartData: function(_state) {
return _state.lineChartData;
},
legendData: function(_state) {
return _state.legendData;
},
tableRows: function(_state) {
return _state.tableRows;
},
selectedEventsOverview: function(_state) {
return _state.selectedEventsOverview;
},
labels: function(_state) {
return _state.labels;
},
limitAlerts: function(_state) {
return _state.limitAlerts;
},
categoriesMap: function(_state) {
return _state.categoriesMap;
},
currentCategory: function(_state) {
return _state.currentCategory;
}
};
return countlyVue.vuex.Module("countlyAllEvents", {
state: getInitialState,
actions: allEventsActions,
mutations: allEventsMutations,
getters: allEventsGetters,
});
};
}(window.countlyAllEvents = window.countlyAllEvents || {})); | 1 | 14,733 | This will be only available when data-manager is enabled, is there a fallback in case data manager is disabled? | Countly-countly-server | js |
@@ -219,7 +219,7 @@ func (o *deploySvcOpts) generateWorkerServiceRecommendedActions() {
retrieveEnvVarCode := "const eventsQueueURI = process.env.COPILOT_QUEUE_URI"
actionRetrieveEnvVar := fmt.Sprintf(
`Update %s's code to leverage the injected environment variable "COPILOT_QUEUE_URI".
-In JavaScript you can write %s.`,
+ In JavaScript you can write %s.`,
o.name,
color.HighlightCode(retrieveEnvVarCode),
) | 1 | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package cli
import (
"errors"
"fmt"
"os"
"path/filepath"
"regexp"
"strings"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/copilot-cli/internal/pkg/apprunner"
"github.com/aws/copilot-cli/internal/pkg/docker/dockerengine"
"github.com/aws/copilot-cli/internal/pkg/ecs"
"github.com/aws/copilot-cli/internal/pkg/template"
"github.com/aws/copilot-cli/internal/pkg/aws/identity"
"github.com/aws/aws-sdk-go/aws"
"golang.org/x/mod/semver"
"github.com/aws/copilot-cli/internal/pkg/addon"
awscloudformation "github.com/aws/copilot-cli/internal/pkg/aws/cloudformation"
"github.com/aws/copilot-cli/internal/pkg/aws/ecr"
"github.com/aws/copilot-cli/internal/pkg/aws/s3"
"github.com/aws/copilot-cli/internal/pkg/aws/sessions"
"github.com/aws/copilot-cli/internal/pkg/aws/tags"
"github.com/aws/copilot-cli/internal/pkg/config"
"github.com/aws/copilot-cli/internal/pkg/deploy"
"github.com/aws/copilot-cli/internal/pkg/deploy/cloudformation"
"github.com/aws/copilot-cli/internal/pkg/deploy/cloudformation/stack"
"github.com/aws/copilot-cli/internal/pkg/describe"
"github.com/aws/copilot-cli/internal/pkg/exec"
"github.com/aws/copilot-cli/internal/pkg/manifest"
"github.com/aws/copilot-cli/internal/pkg/repository"
"github.com/aws/copilot-cli/internal/pkg/term/color"
"github.com/aws/copilot-cli/internal/pkg/term/log"
termprogress "github.com/aws/copilot-cli/internal/pkg/term/progress"
"github.com/aws/copilot-cli/internal/pkg/term/prompt"
"github.com/aws/copilot-cli/internal/pkg/term/selector"
"github.com/aws/copilot-cli/internal/pkg/workspace"
"github.com/spf13/cobra"
)
const (
fmtForceUpdateSvcStart = "Forcing an update for service %s from environment %s"
fmtForceUpdateSvcFailed = "Failed to force an update for service %s from environment %s: %v.\n"
fmtForceUpdateSvcComplete = "Forced an update for service %s from environment %s.\n"
)
type deployWkldVars struct {
appName string
name string
envName string
imageTag string
resourceTags map[string]string
forceNewUpdate bool
}
type uploadCustomResourcesOpts struct {
uploader customResourcesUploader
newS3Uploader func() (Uploader, error)
}
type deploySvcOpts struct {
deployWkldVars
store store
ws wsSvcDirReader
imageBuilderPusher imageBuilderPusher
unmarshal func([]byte) (manifest.WorkloadManifest, error)
s3 artifactUploader
cmd runner
addons templater
appCFN appResourcesGetter
svcCFN serviceDeployer
newSvcUpdater func(func(*session.Session) serviceUpdater)
sessProvider sessionProvider
envUpgradeCmd actionCommand
newAppVersionGetter func(string) (versionGetter, error)
endpointGetter endpointGetter
snsTopicGetter deployedEnvironmentLister
identity identityService
spinner progress
sel wsSelector
prompt prompter
// cached variables
targetApp *config.Application
targetEnvironment *config.Environment
targetSvc *config.Workload
imageDigest string
buildRequired bool
appEnvResources *stack.AppRegionalResources
rdSvcAlias string
svcUpdater serviceUpdater
recommendedActions []string
subscriptions []manifest.TopicSubscription
uploadOpts *uploadCustomResourcesOpts
}
func newSvcDeployOpts(vars deployWkldVars) (*deploySvcOpts, error) {
store, err := config.NewStore()
if err != nil {
return nil, fmt.Errorf("new config store: %w", err)
}
deployStore, err := deploy.NewStore(store)
if err != nil {
return nil, fmt.Errorf("new deploy store: %w", err)
}
ws, err := workspace.New()
if err != nil {
return nil, fmt.Errorf("new workspace: %w", err)
}
prompter := prompt.New()
opts := &deploySvcOpts{
deployWkldVars: vars,
store: store,
ws: ws,
unmarshal: manifest.UnmarshalWorkload,
spinner: termprogress.NewSpinner(log.DiagnosticWriter),
sel: selector.NewWorkspaceSelect(prompter, store, ws),
prompt: prompter,
newAppVersionGetter: func(appName string) (versionGetter, error) {
d, err := describe.NewAppDescriber(appName)
if err != nil {
return nil, fmt.Errorf("new app describer for application %s: %w", appName, err)
}
return d, nil
},
cmd: exec.NewCmd(),
sessProvider: sessions.NewProvider(),
snsTopicGetter: deployStore,
}
opts.uploadOpts = newUploadCustomResourcesOpts(opts)
return opts, err
}
// Validate returns an error if the user inputs are invalid.
func (o *deploySvcOpts) Validate() error {
if o.appName == "" {
return errNoAppInWorkspace
}
if o.name != "" {
if err := o.validateSvcName(); err != nil {
return err
}
}
if o.envName != "" {
if err := o.validateEnvName(); err != nil {
return err
}
}
return nil
}
// Ask prompts the user for any required fields that are not provided.
func (o *deploySvcOpts) Ask() error {
if err := o.askSvcName(); err != nil {
return err
}
if err := o.askEnvName(); err != nil {
return err
}
return nil
}
// Execute builds and pushes the container image for the service,
func (o *deploySvcOpts) Execute() error {
o.imageTag = imageTagFromGit(o.cmd, o.imageTag) // Best effort assign git tag.
env, err := targetEnv(o.store, o.appName, o.envName)
if err != nil {
return err
}
o.targetEnvironment = env
app, err := o.store.GetApplication(o.appName)
if err != nil {
return err
}
o.targetApp = app
svc, err := o.store.GetService(o.appName, o.name)
if err != nil {
return fmt.Errorf("get service configuration: %w", err)
}
o.targetSvc = svc
if err := o.configureClients(); err != nil {
return err
}
if err := o.envUpgradeCmd.Execute(); err != nil {
return fmt.Errorf(`execute "env upgrade --app %s --name %s": %v`, o.appName, o.targetEnvironment.Name, err)
}
if err := o.configureContainerImage(); err != nil {
return err
}
addonsURL, err := o.pushAddonsTemplateToS3Bucket()
if err != nil {
return err
}
if err := o.deploySvc(addonsURL); err != nil {
return err
}
return o.logSuccessfulDeployment()
}
func (o *deploySvcOpts) generateWorkerServiceRecommendedActions() {
retrieveEnvVarCode := "const eventsQueueURI = process.env.COPILOT_QUEUE_URI"
actionRetrieveEnvVar := fmt.Sprintf(
`Update %s's code to leverage the injected environment variable "COPILOT_QUEUE_URI".
In JavaScript you can write %s.`,
o.name,
color.HighlightCode(retrieveEnvVarCode),
)
o.recommendedActions = append(o.recommendedActions, actionRetrieveEnvVar)
topicQueueNames := o.buildWorkerQueueNames()
if topicQueueNames == "" {
return
}
retrieveTopicQueueEnvVarCode := fmt.Sprintf("const {%s} = process.env.COPILOT_TOPIC_QUEUE_URIS", topicQueueNames)
actionRetrieveTopicQueues := fmt.Sprintf(
`You can retrieve topic-specific queues by writing
%s.`,
color.HighlightCode(retrieveTopicQueueEnvVarCode),
)
o.recommendedActions = append(o.recommendedActions, actionRetrieveTopicQueues)
}
// RecommendedActions returns follow-up actions the user can take after successfully executing the command.
func (o *deploySvcOpts) RecommendedActions() []string {
return o.recommendedActions
}
func (o *deploySvcOpts) validateSvcName() error {
names, err := o.ws.ServiceNames()
if err != nil {
return fmt.Errorf("list services in the workspace: %w", err)
}
for _, name := range names {
if o.name == name {
return nil
}
}
return fmt.Errorf("service %s not found in the workspace", color.HighlightUserInput(o.name))
}
func (o *deploySvcOpts) validateEnvName() error {
if _, err := targetEnv(o.store, o.appName, o.envName); err != nil {
return err
}
return nil
}
func targetEnv(s store, appName, envName string) (*config.Environment, error) {
env, err := s.GetEnvironment(appName, envName)
if err != nil {
return nil, fmt.Errorf("get environment %s configuration: %w", envName, err)
}
return env, nil
}
func (o *deploySvcOpts) askSvcName() error {
if o.name != "" {
return nil
}
name, err := o.sel.Service("Select a service in your workspace", "")
if err != nil {
return fmt.Errorf("select service: %w", err)
}
o.name = name
return nil
}
func (o *deploySvcOpts) askEnvName() error {
if o.envName != "" {
return nil
}
name, err := o.sel.Environment("Select an environment", "", o.appName)
if err != nil {
return fmt.Errorf("select environment: %w", err)
}
o.envName = name
return nil
}
func (o *deploySvcOpts) configureClients() error {
defaultSessEnvRegion, err := o.sessProvider.DefaultWithRegion(o.targetEnvironment.Region)
if err != nil {
return fmt.Errorf("create ECR session with region %s: %w", o.targetEnvironment.Region, err)
}
envSession, err := o.sessProvider.FromRole(o.targetEnvironment.ManagerRoleARN, o.targetEnvironment.Region)
if err != nil {
return fmt.Errorf("assuming environment manager role: %w", err)
}
// ECR client against tools account profile AND target environment region.
repoName := fmt.Sprintf("%s/%s", o.appName, o.name)
registry := ecr.New(defaultSessEnvRegion)
o.imageBuilderPusher, err = repository.New(repoName, registry)
if err != nil {
return fmt.Errorf("initiate image builder pusher: %w", err)
}
o.s3 = s3.New(defaultSessEnvRegion)
o.newSvcUpdater = func(f func(*session.Session) serviceUpdater) {
o.svcUpdater = f(envSession)
}
// CF client against env account profile AND target environment region.
o.svcCFN = cloudformation.New(envSession)
o.endpointGetter, err = describe.NewEnvDescriber(describe.NewEnvDescriberConfig{
App: o.appName,
Env: o.envName,
ConfigStore: o.store,
})
if err != nil {
return fmt.Errorf("initiate env describer: %w", err)
}
addonsSvc, err := addon.New(o.name)
if err != nil {
return fmt.Errorf("initiate addons service: %w", err)
}
o.addons = addonsSvc
// client to retrieve an application's resources created with CloudFormation.
defaultSess, err := o.sessProvider.Default()
if err != nil {
return fmt.Errorf("create default session: %w", err)
}
o.appCFN = cloudformation.New(defaultSess)
cmd, err := newEnvUpgradeOpts(envUpgradeVars{
appName: o.appName,
name: o.targetEnvironment.Name,
})
if err != nil {
return fmt.Errorf("new env upgrade command: %v", err)
}
o.envUpgradeCmd = cmd
// client to retrieve caller identity.
id := identity.New(defaultSess)
o.identity = id
return nil
}
func (o *deploySvcOpts) configureContainerImage() error {
svc, err := o.manifest()
if err != nil {
return err
}
required, err := manifest.ServiceDockerfileBuildRequired(svc)
if err != nil {
return err
}
if !required {
return nil
}
// If it is built from local Dockerfile, build and push to the ECR repo.
buildArg, err := o.dfBuildArgs(svc)
if err != nil {
return err
}
digest, err := o.imageBuilderPusher.BuildAndPush(dockerengine.New(exec.NewCmd()), buildArg)
if err != nil {
return fmt.Errorf("build and push image: %w", err)
}
o.imageDigest = digest
o.buildRequired = true
return nil
}
func (o *deploySvcOpts) dfBuildArgs(svc interface{}) (*dockerengine.BuildArguments, error) {
copilotDir, err := o.ws.CopilotDirPath()
if err != nil {
return nil, fmt.Errorf("get copilot directory: %w", err)
}
return buildArgs(o.name, o.imageTag, copilotDir, svc)
}
func buildArgs(name, imageTag, copilotDir string, unmarshaledManifest interface{}) (*dockerengine.BuildArguments, error) {
type dfArgs interface {
BuildArgs(rootDirectory string) *manifest.DockerBuildArgs
TaskPlatform() (*string, error)
}
mf, ok := unmarshaledManifest.(dfArgs)
if !ok {
return nil, fmt.Errorf("%s does not have required methods BuildArgs() and TaskPlatform()", name)
}
var tags []string
if imageTag != "" {
tags = append(tags, imageTag)
}
args := mf.BuildArgs(filepath.Dir(copilotDir))
platform, err := mf.TaskPlatform()
if err != nil {
return nil, fmt.Errorf("get platform for service: %w", err)
}
return &dockerengine.BuildArguments{
Dockerfile: *args.Dockerfile,
Context: *args.Context,
Args: args.Args,
CacheFrom: args.CacheFrom,
Target: aws.StringValue(args.Target),
Platform: aws.StringValue(platform),
Tags: tags,
}, nil
}
// pushAddonsTemplateToS3Bucket generates the addons template for the service and pushes it to S3.
// If the service doesn't have any addons, it returns the empty string and no errors.
// If the service has addons, it returns the URL of the S3 object storing the addons template.
func (o *deploySvcOpts) pushAddonsTemplateToS3Bucket() (string, error) {
template, err := o.addons.Template()
if err != nil {
var notFoundErr *addon.ErrAddonsNotFound
if errors.As(err, ¬FoundErr) {
// addons doesn't exist for service, the url is empty.
return "", nil
}
return "", fmt.Errorf("retrieve addons template: %w", err)
}
if err := o.retrieveAppResourcesForEnvRegion(); err != nil {
return "", err
}
reader := strings.NewReader(template)
url, err := o.s3.PutArtifact(o.appEnvResources.S3Bucket, fmt.Sprintf(deploy.AddonsCfnTemplateNameFormat, o.name), reader)
if err != nil {
return "", fmt.Errorf("put addons artifact to bucket %s: %w", o.appEnvResources.S3Bucket, err)
}
return url, nil
}
func (o *deploySvcOpts) manifest() (interface{}, error) {
raw, err := o.ws.ReadServiceManifest(o.name)
if err != nil {
return nil, fmt.Errorf("read service %s manifest file: %w", o.name, err)
}
mft, err := o.unmarshal(raw)
if err != nil {
return nil, fmt.Errorf("unmarshal service %s manifest: %w", o.name, err)
}
envMft, err := mft.ApplyEnv(o.envName)
if err != nil {
return nil, fmt.Errorf("apply environment %s override: %s", o.envName, err)
}
return envMft, nil
}
func (o *deploySvcOpts) runtimeConfig(addonsURL string) (*stack.RuntimeConfig, error) {
endpoint, err := o.endpointGetter.ServiceDiscoveryEndpoint()
if err != nil {
return nil, err
}
if !o.buildRequired {
return &stack.RuntimeConfig{
AddonsTemplateURL: addonsURL,
AdditionalTags: tags.Merge(o.targetApp.Tags, o.resourceTags),
ServiceDiscoveryEndpoint: endpoint,
AccountID: o.targetApp.AccountID,
Region: o.targetEnvironment.Region,
}, nil
}
if err := o.retrieveAppResourcesForEnvRegion(); err != nil {
return nil, err
}
repoURL, ok := o.appEnvResources.RepositoryURLs[o.name]
if !ok {
return nil, &errRepoNotFound{
wlName: o.name,
envRegion: o.targetEnvironment.Region,
appAccountID: o.targetApp.AccountID,
}
}
return &stack.RuntimeConfig{
AddonsTemplateURL: addonsURL,
AdditionalTags: tags.Merge(o.targetApp.Tags, o.resourceTags),
Image: &stack.ECRImage{
RepoURL: repoURL,
ImageTag: o.imageTag,
Digest: o.imageDigest,
},
ServiceDiscoveryEndpoint: endpoint,
AccountID: o.targetApp.AccountID,
Region: o.targetEnvironment.Region,
}, nil
}
func uploadCustomResources(o *uploadCustomResourcesOpts, appEnvResources *stack.AppRegionalResources) (map[string]string, error) {
s3Client, err := o.newS3Uploader()
if err != nil {
return nil, err
}
urls, err := o.uploader.UploadRequestDrivenWebServiceCustomResources(func(key string, objects ...s3.NamedBinary) (string, error) {
return s3Client.ZipAndUpload(appEnvResources.S3Bucket, key, objects...)
})
if err != nil {
return nil, fmt.Errorf("upload custom resources to bucket %s: %w", appEnvResources.S3Bucket, err)
}
return urls, nil
}
func (o *deploySvcOpts) stackConfiguration(addonsURL string) (cloudformation.StackConfiguration, error) {
mft, err := o.manifest()
if err != nil {
return nil, err
}
rc, err := o.runtimeConfig(addonsURL)
if err != nil {
return nil, err
}
o.newSvcUpdater(func(s *session.Session) serviceUpdater {
return ecs.New(s)
})
var conf cloudformation.StackConfiguration
switch t := mft.(type) {
case *manifest.LoadBalancedWebService:
if o.targetApp.RequiresDNSDelegation() {
var appVersionGetter versionGetter
if appVersionGetter, err = o.newAppVersionGetter(o.appName); err != nil {
return nil, err
}
if err = validateLBSvcAliasAndAppVersion(aws.StringValue(t.Name), t.Alias, o.targetApp, o.envName, appVersionGetter); err != nil {
return nil, err
}
conf, err = stack.NewHTTPSLoadBalancedWebService(t, o.targetEnvironment.Name, o.targetEnvironment.App, *rc)
} else {
conf, err = stack.NewLoadBalancedWebService(t, o.targetEnvironment.Name, o.targetEnvironment.App, *rc)
}
case *manifest.RequestDrivenWebService:
o.newSvcUpdater(func(s *session.Session) serviceUpdater {
return apprunner.New(s)
})
var caller identity.Caller
caller, err = o.identity.Get()
if err != nil {
return nil, fmt.Errorf("get identity: %w", err)
}
appInfo := deploy.AppInformation{
Name: o.targetEnvironment.App,
DNSName: o.targetApp.Domain,
AccountPrincipalARN: caller.RootUserARN,
}
if t.Alias == nil {
conf, err = stack.NewRequestDrivenWebService(t, o.targetEnvironment.Name, appInfo, *rc)
break
}
o.rdSvcAlias = aws.StringValue(t.Alias)
var (
urls map[string]string
appVersionGetter versionGetter
)
if appVersionGetter, err = o.newAppVersionGetter(o.appName); err != nil {
return nil, err
}
if err = validateRDSvcAliasAndAppVersion(o.name, aws.StringValue(t.Alias), o.envName, o.targetApp, appVersionGetter); err != nil {
return nil, err
}
if err = o.retrieveAppResourcesForEnvRegion(); err != nil {
return nil, err
}
if urls, err = uploadCustomResources(o.uploadOpts, o.appEnvResources); err != nil {
return nil, err
}
conf, err = stack.NewRequestDrivenWebServiceWithAlias(t, o.targetEnvironment.Name, appInfo, *rc, urls)
case *manifest.BackendService:
conf, err = stack.NewBackendService(t, o.targetEnvironment.Name, o.targetEnvironment.App, *rc)
case *manifest.WorkerService:
var topics []deploy.Topic
topics, err = o.snsTopicGetter.ListSNSTopics(o.appName, o.envName)
if err != nil {
return nil, fmt.Errorf("get SNS topics for app %s and environment %s: %w", o.appName, o.envName, err)
}
var topicARNs []string
for _, topic := range topics {
topicARNs = append(topicARNs, topic.ARN())
}
type subscriptions interface {
Subscriptions() []manifest.TopicSubscription
}
subscriptionGetter, ok := mft.(subscriptions)
if !ok {
return nil, errors.New("manifest does not have required method Subscriptions")
}
// Cache the subscriptions for later.
o.subscriptions = subscriptionGetter.Subscriptions()
if err = validateTopicsExist(o.subscriptions, topicARNs, o.appName, o.envName); err != nil {
return nil, err
}
conf, err = stack.NewWorkerService(t, o.targetEnvironment.Name, o.targetEnvironment.App, *rc)
default:
return nil, fmt.Errorf("unknown manifest type %T while creating the CloudFormation stack", t)
}
if err != nil {
return nil, fmt.Errorf("create stack configuration: %w", err)
}
return conf, nil
}
func (o *deploySvcOpts) deploySvc(addonsURL string) error {
conf, err := o.stackConfiguration(addonsURL)
if err != nil {
return err
}
if err := o.svcCFN.DeployService(os.Stderr, conf, awscloudformation.WithRoleARN(o.targetEnvironment.ExecutionRoleARN)); err != nil {
var errEmptyCS *awscloudformation.ErrChangeSetEmpty
if errors.As(err, &errEmptyCS) {
if o.forceNewUpdate {
return o.forceDeploy()
}
log.Warningf("Set --%s to force an update for the service.\n", forceFlag)
}
return fmt.Errorf("deploy service: %w", err)
}
return nil
}
func (o *deploySvcOpts) forceDeploy() error {
// Force update the service if --force is set and change set is empty.
o.spinner.Start(fmt.Sprintf(fmtForceUpdateSvcStart, color.HighlightUserInput(o.name), color.HighlightUserInput(o.envName)))
if err := o.svcUpdater.ForceUpdateService(o.appName, o.envName, o.name); err != nil {
errLog := fmt.Sprintf(fmtForceUpdateSvcFailed, color.HighlightUserInput(o.name),
color.HighlightUserInput(o.envName), err)
var terr timeoutError
if errors.As(err, &terr) {
errLog = fmt.Sprintf("%s Run %s to check for the fail reason.\n", errLog,
color.HighlightCode(fmt.Sprintf("copilot svc status --name %s --env %s", o.name, o.envName)))
}
o.spinner.Stop(log.Serror(errLog))
return fmt.Errorf("force an update for service %s: %w", o.name, err)
}
o.spinner.Stop(log.Ssuccessf(fmtForceUpdateSvcComplete, color.HighlightUserInput(o.name), color.HighlightUserInput(o.envName)))
return nil
}
func validateLBSvcAliasAndAppVersion(svcName string, aliases *manifest.Alias, app *config.Application, envName string, appVersionGetter versionGetter) error {
if aliases == nil {
return nil
}
aliasList, err := aliases.ToStringSlice()
if err != nil {
return fmt.Errorf(`convert 'http.alias' to string slice: %w`, err)
}
if err := validateAppVersion(app.Name, appVersionGetter); err != nil {
logAppVersionOutdatedError(svcName)
return err
}
for _, alias := range aliasList {
// Alias should be within either env, app, or root hosted zone.
var regEnvHostedZone, regAppHostedZone, regRootHostedZone *regexp.Regexp
var err error
if regEnvHostedZone, err = regexp.Compile(fmt.Sprintf(`^([^\.]+\.)?%s.%s.%s`, envName, app.Name, app.Domain)); err != nil {
return err
}
if regAppHostedZone, err = regexp.Compile(fmt.Sprintf(`^([^\.]+\.)?%s.%s`, app.Name, app.Domain)); err != nil {
return err
}
if regRootHostedZone, err = regexp.Compile(fmt.Sprintf(`^([^\.]+\.)?%s`, app.Domain)); err != nil {
return err
}
var validAlias bool
for _, re := range []*regexp.Regexp{regEnvHostedZone, regAppHostedZone, regRootHostedZone} {
if re.MatchString(alias) {
validAlias = true
break
}
}
if validAlias {
continue
}
log.Errorf(`%s must match one of the following patterns:
- %s.%s.%s,
- <name>.%s.%s.%s,
- %s.%s,
- <name>.%s.%s,
- %s,
- <name>.%s
`, color.HighlightCode("http.alias"), envName, app.Name, app.Domain, envName,
app.Name, app.Domain, app.Name, app.Domain, app.Name,
app.Domain, app.Domain, app.Domain)
return fmt.Errorf(`alias "%s" is not supported in hosted zones managed by Copilot`, alias)
}
return nil
}
func checkUnsupportedRDSvcAlias(alias, envName string, app *config.Application) error {
var regEnvHostedZone, regAppHostedZone *regexp.Regexp
var err error
// Example: subdomain.env.app.domain, env.app.domain
if regEnvHostedZone, err = regexp.Compile(fmt.Sprintf(`^([^\.]+\.)?%s.%s.%s`, envName, app.Name, app.Domain)); err != nil {
return err
}
// Example: subdomain.app.domain, app.domain
if regAppHostedZone, err = regexp.Compile(fmt.Sprintf(`^([^\.]+\.)?%s.%s`, app.Name, app.Domain)); err != nil {
return err
}
if regEnvHostedZone.MatchString(alias) {
return fmt.Errorf("%s is an environment-level alias, which is not supported yet", alias)
}
if regAppHostedZone.MatchString(alias) {
return fmt.Errorf("%s is an application-level alias, which is not supported yet", alias)
}
if alias == app.Domain {
return fmt.Errorf("%s is a root domain alias, which is not supported yet", alias)
}
return nil
}
func validateRDSvcAliasAndAppVersion(svcName, alias, envName string, app *config.Application, appVersionGetter versionGetter) error {
if alias == "" {
return nil
}
if err := validateAppVersion(app.Name, appVersionGetter); err != nil {
logAppVersionOutdatedError(svcName)
return err
}
// Alias should be within root hosted zone.
aliasInvalidLog := fmt.Sprintf(`%s of %s field should match the pattern <subdomain>.%s
Where <subdomain> cannot be the application name.
`, color.HighlightUserInput(alias), color.HighlightCode("http.alias"), app.Domain)
if err := checkUnsupportedRDSvcAlias(alias, envName, app); err != nil {
log.Errorf(aliasInvalidLog)
return err
}
// Example: subdomain.domain
regRootHostedZone, err := regexp.Compile(fmt.Sprintf(`^([^\.]+\.)%s`, app.Domain))
if err != nil {
return err
}
if regRootHostedZone.MatchString(alias) {
return nil
}
log.Errorf(aliasInvalidLog)
return fmt.Errorf("alias is not supported in hosted zones that are not managed by Copilot")
}
func validateAppVersion(appName string, appVersionGetter versionGetter) error {
appVersion, err := appVersionGetter.Version()
if err != nil {
return fmt.Errorf("get version for app %s: %w", appName, err)
}
diff := semver.Compare(appVersion, deploy.AliasLeastAppTemplateVersion)
if diff < 0 {
return fmt.Errorf(`alias is not compatible with application versions below %s`, deploy.AliasLeastAppTemplateVersion)
}
return nil
}
func logAppVersionOutdatedError(name string) {
log.Errorf(`Cannot deploy service %s because the application version is incompatible.
To upgrade the application, please run %s first (see https://aws.github.io/copilot-cli/docs/credentials/#application-credentials).
`, name, color.HighlightCode("copilot app upgrade"))
}
func newUploadCustomResourcesOpts(opts *deploySvcOpts) *uploadCustomResourcesOpts {
return &uploadCustomResourcesOpts{
uploader: template.New(),
newS3Uploader: func() (Uploader, error) {
envRegion := opts.targetEnvironment.Region
sess, err := opts.sessProvider.DefaultWithRegion(opts.targetEnvironment.Region)
if err != nil {
return nil, fmt.Errorf("create session with region %s: %w", envRegion, err)
}
s3Client := s3.New(sess)
return s3Client, nil
},
}
}
func (o *deploySvcOpts) retrieveAppResourcesForEnvRegion() error {
if o.appEnvResources != nil {
return nil
}
resources, err := o.appCFN.GetAppResourcesByRegion(o.targetApp, o.targetEnvironment.Region)
if err != nil {
return fmt.Errorf("get application %s resources from region %s: %w", o.targetApp.Name, o.targetEnvironment.Region, err)
}
o.appEnvResources = resources
return nil
}
func (o *deploySvcOpts) logSuccessfulDeployment() error {
type identifier interface {
URI(string) (string, error)
}
var ecsSvcDescriber identifier
var err error
switch o.targetSvc.Type {
case manifest.LoadBalancedWebServiceType:
ecsSvcDescriber, err = describe.NewLBWebServiceDescriber(describe.NewServiceConfig{
App: o.appName,
Svc: o.name,
ConfigStore: o.store,
})
case manifest.RequestDrivenWebServiceType:
ecsSvcDescriber, err = describe.NewRDWebServiceDescriber(describe.NewServiceConfig{
App: o.appName,
Svc: o.name,
ConfigStore: o.store,
})
case manifest.BackendServiceType:
ecsSvcDescriber, err = describe.NewBackendServiceDescriber(describe.NewServiceConfig{
App: o.appName,
Svc: o.name,
ConfigStore: o.store,
})
case manifest.WorkerServiceType:
o.generateWorkerServiceRecommendedActions()
log.Successf("Deployed %s.\n", color.HighlightUserInput(o.name))
return nil
default:
err = errors.New("unexpected service type")
}
if err != nil {
return fmt.Errorf("create describer for service type %s: %w", o.targetSvc.Type, err)
}
uri, err := ecsSvcDescriber.URI(o.targetEnvironment.Name)
if err != nil {
return fmt.Errorf("get uri for environment %s: %w", o.targetEnvironment.Name, err)
}
switch o.targetSvc.Type {
case manifest.BackendServiceType:
msg := fmt.Sprintf("Deployed %s.\n", color.HighlightUserInput(o.name))
if uri != describe.BlankServiceDiscoveryURI {
msg = fmt.Sprintf("Deployed %s, its service discovery endpoint is %s.\n", color.HighlightUserInput(o.name), color.HighlightResource(uri))
}
log.Success(msg)
case manifest.RequestDrivenWebServiceType:
log.Successf("Deployed %s, you can access it at %s.\n", color.HighlightUserInput(o.name), color.HighlightResource(uri))
if o.rdSvcAlias != "" {
log.Infof(`The validation process for https://%s can take more than 15 minutes.
Please visit %s to check the validation status.
`, o.rdSvcAlias, color.Emphasize("https://console.aws.amazon.com/apprunner/home"))
}
default:
log.Successf("Deployed %s, you can access it at %s.\n", color.HighlightUserInput(o.name), color.HighlightResource(uri))
}
return nil
}
func (o *deploySvcOpts) buildWorkerQueueNames() string {
sb := new(strings.Builder)
first := true
for _, subscription := range o.subscriptions {
if subscription.Queue == nil {
continue
}
topicSvc := template.StripNonAlphaNumFunc(subscription.Service)
topicName := template.StripNonAlphaNumFunc(subscription.Name)
subName := fmt.Sprintf("%s%sEventsQueue", topicSvc, strings.Title(topicName))
if first {
sb.WriteString(subName)
first = false
} else {
sb.WriteString(fmt.Sprintf(", %s", subName))
}
}
return sb.String()
}
// buildSvcDeployCmd builds the `svc deploy` subcommand.
func buildSvcDeployCmd() *cobra.Command {
vars := deployWkldVars{}
cmd := &cobra.Command{
Use: "deploy",
Short: "Deploys a service to an environment.",
Long: `Deploys a service to an environment.`,
Example: `
Deploys a service named "frontend" to a "test" environment.
/code $ copilot svc deploy --name frontend --env test
Deploys a service with additional resource tags.
/code $ copilot svc deploy --resource-tags source/revision=bb133e7,deployment/initiator=manual`,
RunE: runCmdE(func(cmd *cobra.Command, args []string) error {
opts, err := newSvcDeployOpts(vars)
if err != nil {
return err
}
if err := opts.Validate(); err != nil {
return err
}
if err := opts.Ask(); err != nil {
return err
}
if err := opts.Execute(); err != nil {
return err
}
if len(opts.RecommendedActions()) > 0 {
log.Infoln("Recommended follow-up actions:")
for _, action := range opts.RecommendedActions() {
log.Infof("- %s\n", action)
}
}
return nil
}),
}
cmd.Flags().StringVarP(&vars.appName, appFlag, appFlagShort, tryReadingAppName(), appFlagDescription)
cmd.Flags().StringVarP(&vars.name, nameFlag, nameFlagShort, "", svcFlagDescription)
cmd.Flags().StringVarP(&vars.envName, envFlag, envFlagShort, "", envFlagDescription)
cmd.Flags().StringVar(&vars.imageTag, imageTagFlag, "", imageTagFlagDescription)
cmd.Flags().StringToStringVar(&vars.resourceTags, resourceTagsFlag, nil, resourceTagsFlagDescription)
cmd.Flags().BoolVar(&vars.forceNewUpdate, forceFlag, false, forceFlagDescription)
return cmd
}
| 1 | 19,108 | Do you think we want to move the call to `generateWorkerServiceRecommendedActions` inside `RecommandedActions()`? | aws-copilot-cli | go |
@@ -3277,7 +3277,7 @@ bool CoreChecks::PreCallValidateGetQueryPoolResults(VkDevice device, VkQueryPool
VkQueryResultFlags flags) const {
if (disabled.query_validation) return false;
bool skip = false;
- skip |= ValidateQueryPoolStride("VUID-vkGetQueryPoolResults-flags-00814", "VUID-vkGetQueryPoolResults-flags-00815", stride,
+ skip |= ValidateQueryPoolStride("VUID-vkGetQueryPoolResults-flags-02827", "VUID-vkGetQueryPoolResults-flags-00815", stride,
"dataSize", dataSize, flags);
skip |= ValidateGetQueryPoolResultsFlags(queryPool, flags);
skip |= ValidateGetQueryPoolResultsQueries(queryPool, firstQuery, queryCount); | 1 | /* Copyright (c) 2015-2019 The Khronos Group Inc.
* Copyright (c) 2015-2019 Valve Corporation
* Copyright (c) 2015-2019 LunarG, Inc.
* Copyright (C) 2015-2019 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Author: Cody Northrop <[email protected]>
* Author: Michael Lentine <[email protected]>
* Author: Tobin Ehlis <[email protected]>
* Author: Chia-I Wu <[email protected]>
* Author: Chris Forbes <[email protected]>
* Author: Mark Lobodzinski <[email protected]>
* Author: Ian Elliott <[email protected]>
* Author: Dave Houlton <[email protected]>
* Author: Dustin Graves <[email protected]>
* Author: Jeremy Hayes <[email protected]>
* Author: Jon Ashburn <[email protected]>
* Author: Karl Schultz <[email protected]>
* Author: Mark Young <[email protected]>
* Author: Mike Schuchardt <[email protected]>
* Author: Mike Weiblen <[email protected]>
* Author: Tony Barbour <[email protected]>
* Author: John Zulauf <[email protected]>
* Author: Shannon McPherson <[email protected]>
*/
// Allow use of STL min and max functions in Windows
#define NOMINMAX
#include <algorithm>
#include <array>
#include <assert.h>
#include <cmath>
#include <iostream>
#include <list>
#include <map>
#include <memory>
#include <mutex>
#include <set>
#include <sstream>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <string>
#include <valarray>
#include "vk_loader_platform.h"
#include "vk_dispatch_table_helper.h"
#include "vk_enum_string_helper.h"
#include "chassis.h"
#include "convert_to_renderpass2.h"
#include "core_validation.h"
#include "buffer_validation.h"
#include "shader_validation.h"
#include "vk_layer_utils.h"
// Array of command names indexed by CMD_TYPE enum
static const std::array<const char *, CMD_RANGE_SIZE> command_name_list = {{VUID_CMD_NAME_LIST}};
// These functions are defined *outside* the core_validation namespace as their type
// is also defined outside that namespace
size_t PipelineLayoutCompatDef::hash() const {
hash_util::HashCombiner hc;
// The set number is integral to the CompatDef's distinctiveness
hc << set << push_constant_ranges.get();
const auto &descriptor_set_layouts = *set_layouts_id.get();
for (uint32_t i = 0; i <= set; i++) {
hc << descriptor_set_layouts[i].get();
}
return hc.Value();
}
bool PipelineLayoutCompatDef::operator==(const PipelineLayoutCompatDef &other) const {
if ((set != other.set) || (push_constant_ranges != other.push_constant_ranges)) {
return false;
}
if (set_layouts_id == other.set_layouts_id) {
// if it's the same set_layouts_id, then *any* subset will match
return true;
}
// They aren't exactly the same PipelineLayoutSetLayouts, so we need to check if the required subsets match
const auto &descriptor_set_layouts = *set_layouts_id.get();
assert(set < descriptor_set_layouts.size());
const auto &other_ds_layouts = *other.set_layouts_id.get();
assert(set < other_ds_layouts.size());
for (uint32_t i = 0; i <= set; i++) {
if (descriptor_set_layouts[i] != other_ds_layouts[i]) {
return false;
}
}
return true;
}
using std::max;
using std::string;
using std::stringstream;
using std::unique_ptr;
using std::unordered_map;
using std::unordered_set;
using std::vector;
// Get the global maps of pending releases
const GlobalQFOTransferBarrierMap<VkImageMemoryBarrier> &CoreChecks::GetGlobalQFOReleaseBarrierMap(
const QFOTransferBarrier<VkImageMemoryBarrier>::Tag &type_tag) const {
return qfo_release_image_barrier_map;
}
const GlobalQFOTransferBarrierMap<VkBufferMemoryBarrier> &CoreChecks::GetGlobalQFOReleaseBarrierMap(
const QFOTransferBarrier<VkBufferMemoryBarrier>::Tag &type_tag) const {
return qfo_release_buffer_barrier_map;
}
GlobalQFOTransferBarrierMap<VkImageMemoryBarrier> &CoreChecks::GetGlobalQFOReleaseBarrierMap(
const QFOTransferBarrier<VkImageMemoryBarrier>::Tag &type_tag) {
return qfo_release_image_barrier_map;
}
GlobalQFOTransferBarrierMap<VkBufferMemoryBarrier> &CoreChecks::GetGlobalQFOReleaseBarrierMap(
const QFOTransferBarrier<VkBufferMemoryBarrier>::Tag &type_tag) {
return qfo_release_buffer_barrier_map;
}
ImageSubresourceLayoutMap::InitialLayoutState::InitialLayoutState(const CMD_BUFFER_STATE &cb_state,
const IMAGE_VIEW_STATE *view_state)
: image_view(VK_NULL_HANDLE), aspect_mask(0), label(cb_state.debug_label) {
if (view_state) {
image_view = view_state->image_view;
aspect_mask = view_state->create_info.subresourceRange.aspectMask;
}
}
std::string FormatDebugLabel(const char *prefix, const LoggingLabel &label) {
if (label.Empty()) return std::string();
std::string out;
string_sprintf(&out, "%sVkDebugUtilsLabel(name='%s' color=[%g, %g %g, %g])", prefix, label.name.c_str(), label.color[0],
label.color[1], label.color[2], label.color[3]);
return out;
}
// the ImageLayoutMap implementation bakes in the number of valid aspects -- we have to choose the correct one at construction time
template <uint32_t kThreshold>
static std::unique_ptr<ImageSubresourceLayoutMap> LayoutMapFactoryByAspect(const IMAGE_STATE &image_state) {
ImageSubresourceLayoutMap *map = nullptr;
switch (image_state.full_range.aspectMask) {
case VK_IMAGE_ASPECT_COLOR_BIT:
map = new ImageSubresourceLayoutMapImpl<ColorAspectTraits, kThreshold>(image_state);
break;
case VK_IMAGE_ASPECT_DEPTH_BIT:
map = new ImageSubresourceLayoutMapImpl<DepthAspectTraits, kThreshold>(image_state);
break;
case VK_IMAGE_ASPECT_STENCIL_BIT:
map = new ImageSubresourceLayoutMapImpl<StencilAspectTraits, kThreshold>(image_state);
break;
case VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT:
map = new ImageSubresourceLayoutMapImpl<DepthStencilAspectTraits, kThreshold>(image_state);
break;
case VK_IMAGE_ASPECT_PLANE_0_BIT | VK_IMAGE_ASPECT_PLANE_1_BIT:
map = new ImageSubresourceLayoutMapImpl<Multiplane2AspectTraits, kThreshold>(image_state);
break;
case VK_IMAGE_ASPECT_PLANE_0_BIT | VK_IMAGE_ASPECT_PLANE_1_BIT | VK_IMAGE_ASPECT_PLANE_2_BIT:
map = new ImageSubresourceLayoutMapImpl<Multiplane3AspectTraits, kThreshold>(image_state);
break;
}
assert(map); // We shouldn't be able to get here null unless the traits cases are incomplete
return std::unique_ptr<ImageSubresourceLayoutMap>(map);
}
static std::unique_ptr<ImageSubresourceLayoutMap> LayoutMapFactory(const IMAGE_STATE &image_state) {
std::unique_ptr<ImageSubresourceLayoutMap> map;
const uint32_t kAlwaysDenseLimit = 16; // About a cacheline on deskop architectures
if (image_state.full_range.layerCount <= kAlwaysDenseLimit) {
// Create a dense row map
map = LayoutMapFactoryByAspect<0>(image_state);
} else {
// Create an initially sparse row map
map = LayoutMapFactoryByAspect<kAlwaysDenseLimit>(image_state);
}
return map;
}
// The const variant only need the image as it is the key for the map
const ImageSubresourceLayoutMap *GetImageSubresourceLayoutMap(const CMD_BUFFER_STATE *cb_state, VkImage image) {
auto it = cb_state->image_layout_map.find(image);
if (it == cb_state->image_layout_map.cend()) {
return nullptr;
}
return it->second.get();
}
// The non-const variant only needs the image state, as the factory requires it to construct a new entry
ImageSubresourceLayoutMap *GetImageSubresourceLayoutMap(CMD_BUFFER_STATE *cb_state, const IMAGE_STATE &image_state) {
auto it = cb_state->image_layout_map.find(image_state.image);
if (it == cb_state->image_layout_map.end()) {
// Empty slot... fill it in.
auto insert_pair = cb_state->image_layout_map.insert(std::make_pair(image_state.image, LayoutMapFactory(image_state)));
assert(insert_pair.second);
ImageSubresourceLayoutMap *new_map = insert_pair.first->second.get();
assert(new_map);
return new_map;
}
return it->second.get();
}
// Tracks the number of commands recorded in a command buffer.
void CoreChecks::IncrementCommandCount(VkCommandBuffer commandBuffer) {
CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
cb_state->commandCount++;
}
// For given mem object, verify that it is not null or UNBOUND, if it is, report error. Return skip value.
bool CoreChecks::VerifyBoundMemoryIsValid(VkDeviceMemory mem, const VulkanTypedHandle &typed_handle, const char *api_name,
const char *error_code) const {
bool result = false;
auto type_name = object_string[typed_handle.type];
if (VK_NULL_HANDLE == mem) {
result = log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, typed_handle.handle,
error_code, "%s: %s used with no memory bound. Memory should be bound by calling vkBind%sMemory().",
api_name, report_data->FormatHandle(typed_handle).c_str(), type_name + 2);
} else if (MEMORY_UNBOUND == mem) {
result = log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, typed_handle.handle,
error_code,
"%s: %s used with no memory bound and previously bound memory was freed. Memory must not be freed "
"prior to this operation.",
api_name, report_data->FormatHandle(typed_handle).c_str());
}
return result;
}
// Check to see if memory was ever bound to this image
bool CoreChecks::ValidateMemoryIsBoundToImage(const IMAGE_STATE *image_state, const char *api_name, const char *error_code) const {
bool result = false;
if (image_state->create_from_swapchain != VK_NULL_HANDLE) {
if (image_state->bind_swapchain == VK_NULL_HANDLE) {
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(image_state->image), error_code,
"%s: %s is created by %s, and the image should be bound by calling vkBindImageMemory2(), and the pNext chain "
"includes VkBindImageMemorySwapchainInfoKHR.",
api_name, report_data->FormatHandle(image_state->image).c_str(),
report_data->FormatHandle(image_state->create_from_swapchain).c_str());
} else if (image_state->create_from_swapchain != image_state->bind_swapchain) {
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(image_state->image), error_code,
"%s: %s is created by %s, but the image is bound by %s. The image should be created and bound by the same "
"swapchain",
api_name, report_data->FormatHandle(image_state->image).c_str(),
report_data->FormatHandle(image_state->create_from_swapchain).c_str(),
report_data->FormatHandle(image_state->bind_swapchain).c_str());
}
} else if (0 == (static_cast<uint32_t>(image_state->createInfo.flags) & VK_IMAGE_CREATE_SPARSE_BINDING_BIT)) {
result = VerifyBoundMemoryIsValid(image_state->binding.mem, VulkanTypedHandle(image_state->image, kVulkanObjectTypeImage),
api_name, error_code);
}
return result;
}
// Check to see if memory was bound to this buffer
bool CoreChecks::ValidateMemoryIsBoundToBuffer(const BUFFER_STATE *buffer_state, const char *api_name,
const char *error_code) const {
bool result = false;
if (0 == (static_cast<uint32_t>(buffer_state->createInfo.flags) & VK_BUFFER_CREATE_SPARSE_BINDING_BIT)) {
result = VerifyBoundMemoryIsValid(buffer_state->binding.mem,
VulkanTypedHandle(buffer_state->buffer, kVulkanObjectTypeBuffer), api_name, error_code);
}
return result;
}
// Check to see if memory was bound to this acceleration structure
bool CoreChecks::ValidateMemoryIsBoundToAccelerationStructure(const ACCELERATION_STRUCTURE_STATE *as_state, const char *api_name,
const char *error_code) const {
return VerifyBoundMemoryIsValid(as_state->binding.mem,
VulkanTypedHandle(as_state->acceleration_structure, kVulkanObjectTypeAccelerationStructureNV),
api_name, error_code);
}
// Valid usage checks for a call to SetMemBinding().
// For NULL mem case, output warning
// Make sure given object is in global object map
// IF a previous binding existed, output validation error
// Otherwise, add reference from objectInfo to memoryInfo
// Add reference off of objInfo
// TODO: We may need to refactor or pass in multiple valid usage statements to handle multiple valid usage conditions.
bool CoreChecks::ValidateSetMemBinding(VkDeviceMemory mem, const VulkanTypedHandle &typed_handle, const char *apiName) const {
bool skip = false;
// It's an error to bind an object to NULL memory
if (mem != VK_NULL_HANDLE) {
const BINDABLE *mem_binding = ValidationStateTracker::GetObjectMemBinding(typed_handle);
assert(mem_binding);
if (mem_binding->sparse) {
const char *error_code = "VUID-vkBindImageMemory-image-01045";
const char *handle_type = "IMAGE";
if (typed_handle.type == kVulkanObjectTypeBuffer) {
error_code = "VUID-vkBindBufferMemory-buffer-01030";
handle_type = "BUFFER";
} else {
assert(typed_handle.type == kVulkanObjectTypeImage);
}
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
HandleToUint64(mem), error_code,
"In %s, attempting to bind %s to %s which was created with sparse memory flags "
"(VK_%s_CREATE_SPARSE_*_BIT).",
apiName, report_data->FormatHandle(mem).c_str(), report_data->FormatHandle(typed_handle).c_str(),
handle_type);
}
const DEVICE_MEMORY_STATE *mem_info = ValidationStateTracker::GetDevMemState(mem);
if (mem_info) {
const DEVICE_MEMORY_STATE *prev_binding = ValidationStateTracker::GetDevMemState(mem_binding->binding.mem);
if (prev_binding) {
const char *error_code = "VUID-vkBindImageMemory-image-01044";
if (typed_handle.type == kVulkanObjectTypeBuffer) {
error_code = "VUID-vkBindBufferMemory-buffer-01029";
} else {
assert(typed_handle.type == kVulkanObjectTypeImage);
}
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
HandleToUint64(mem), error_code,
"In %s, attempting to bind %s to %s which has already been bound to %s.", apiName,
report_data->FormatHandle(mem).c_str(), report_data->FormatHandle(typed_handle).c_str(),
report_data->FormatHandle(prev_binding->mem).c_str());
} else if (mem_binding->binding.mem == MEMORY_UNBOUND) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
HandleToUint64(mem), kVUID_Core_MemTrack_RebindObject,
"In %s, attempting to bind %s to %s which was previous bound to memory that has "
"since been freed. Memory bindings are immutable in "
"Vulkan so this attempt to bind to new memory is not allowed.",
apiName, report_data->FormatHandle(mem).c_str(), report_data->FormatHandle(typed_handle).c_str());
}
}
}
return skip;
}
bool CoreChecks::ValidateDeviceQueueFamily(uint32_t queue_family, const char *cmd_name, const char *parameter_name,
const char *error_code, bool optional = false) const {
bool skip = false;
if (!optional && queue_family == VK_QUEUE_FAMILY_IGNORED) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device),
error_code,
"%s: %s is VK_QUEUE_FAMILY_IGNORED, but it is required to provide a valid queue family index value.",
cmd_name, parameter_name);
} else if (queue_family_index_map.find(queue_family) == queue_family_index_map.end()) {
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), error_code,
"%s: %s (= %" PRIu32
") is not one of the queue families given via VkDeviceQueueCreateInfo structures when the device was created.",
cmd_name, parameter_name, queue_family);
}
return skip;
}
bool CoreChecks::ValidateQueueFamilies(uint32_t queue_family_count, const uint32_t *queue_families, const char *cmd_name,
const char *array_parameter_name, const char *unique_error_code,
const char *valid_error_code, bool optional = false) const {
bool skip = false;
if (queue_families) {
std::unordered_set<uint32_t> set;
for (uint32_t i = 0; i < queue_family_count; ++i) {
std::string parameter_name = std::string(array_parameter_name) + "[" + std::to_string(i) + "]";
if (set.count(queue_families[i])) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(device), unique_error_code, "%s: %s (=%" PRIu32 ") is not unique within %s array.",
cmd_name, parameter_name.c_str(), queue_families[i], array_parameter_name);
} else {
set.insert(queue_families[i]);
skip |= ValidateDeviceQueueFamily(queue_families[i], cmd_name, parameter_name.c_str(), valid_error_code, optional);
}
}
}
return skip;
}
// Check object status for selected flag state
bool CoreChecks::ValidateStatus(const CMD_BUFFER_STATE *pNode, CBStatusFlags status_mask, VkFlags msg_flags, const char *fail_msg,
const char *msg_code) const {
if (!(pNode->status & status_mask)) {
return log_msg(report_data, msg_flags, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(pNode->commandBuffer),
msg_code, "%s: %s..", report_data->FormatHandle(pNode->commandBuffer).c_str(), fail_msg);
}
return false;
}
// Return true if for a given PSO, the given state enum is dynamic, else return false
static bool IsDynamic(const PIPELINE_STATE *pPipeline, const VkDynamicState state) {
if (pPipeline && pPipeline->graphicsPipelineCI.pDynamicState) {
for (uint32_t i = 0; i < pPipeline->graphicsPipelineCI.pDynamicState->dynamicStateCount; i++) {
if (state == pPipeline->graphicsPipelineCI.pDynamicState->pDynamicStates[i]) return true;
}
}
return false;
}
// Validate state stored as flags at time of draw call
bool CoreChecks::ValidateDrawStateFlags(const CMD_BUFFER_STATE *pCB, const PIPELINE_STATE *pPipe, bool indexed,
const char *msg_code) const {
bool result = false;
if (pPipe->topology_at_rasterizer == VK_PRIMITIVE_TOPOLOGY_LINE_LIST ||
pPipe->topology_at_rasterizer == VK_PRIMITIVE_TOPOLOGY_LINE_STRIP) {
result |= ValidateStatus(pCB, CBSTATUS_LINE_WIDTH_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
"Dynamic line width state not set for this command buffer", msg_code);
}
if (pPipe->graphicsPipelineCI.pRasterizationState &&
(pPipe->graphicsPipelineCI.pRasterizationState->depthBiasEnable == VK_TRUE)) {
result |= ValidateStatus(pCB, CBSTATUS_DEPTH_BIAS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
"Dynamic depth bias state not set for this command buffer", msg_code);
}
if (pPipe->blendConstantsEnabled) {
result |= ValidateStatus(pCB, CBSTATUS_BLEND_CONSTANTS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
"Dynamic blend constants state not set for this command buffer", msg_code);
}
if (pPipe->graphicsPipelineCI.pDepthStencilState &&
(pPipe->graphicsPipelineCI.pDepthStencilState->depthBoundsTestEnable == VK_TRUE)) {
result |= ValidateStatus(pCB, CBSTATUS_DEPTH_BOUNDS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
"Dynamic depth bounds state not set for this command buffer", msg_code);
}
if (pPipe->graphicsPipelineCI.pDepthStencilState &&
(pPipe->graphicsPipelineCI.pDepthStencilState->stencilTestEnable == VK_TRUE)) {
result |= ValidateStatus(pCB, CBSTATUS_STENCIL_READ_MASK_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
"Dynamic stencil read mask state not set for this command buffer", msg_code);
result |= ValidateStatus(pCB, CBSTATUS_STENCIL_WRITE_MASK_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
"Dynamic stencil write mask state not set for this command buffer", msg_code);
result |= ValidateStatus(pCB, CBSTATUS_STENCIL_REFERENCE_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
"Dynamic stencil reference state not set for this command buffer", msg_code);
}
if (indexed) {
result |= ValidateStatus(pCB, CBSTATUS_INDEX_BUFFER_BOUND, VK_DEBUG_REPORT_ERROR_BIT_EXT,
"Index buffer object not bound to this command buffer when Indexed Draw attempted", msg_code);
}
if (pPipe->topology_at_rasterizer == VK_PRIMITIVE_TOPOLOGY_LINE_LIST ||
pPipe->topology_at_rasterizer == VK_PRIMITIVE_TOPOLOGY_LINE_STRIP) {
const auto *line_state =
lvl_find_in_chain<VkPipelineRasterizationLineStateCreateInfoEXT>(pPipe->graphicsPipelineCI.pRasterizationState->pNext);
if (line_state && line_state->stippledLineEnable) {
result |= ValidateStatus(pCB, CBSTATUS_LINE_STIPPLE_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
"Dynamic line stipple state not set for this command buffer", msg_code);
}
}
return result;
}
bool CoreChecks::LogInvalidAttachmentMessage(const char *type1_string, const RENDER_PASS_STATE *rp1_state, const char *type2_string,
const RENDER_PASS_STATE *rp2_state, uint32_t primary_attach, uint32_t secondary_attach,
const char *msg, const char *caller, const char *error_code) const {
return log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
HandleToUint64(rp1_state->renderPass), error_code,
"%s: RenderPasses incompatible between %s w/ %s and %s w/ %s Attachment %u is not "
"compatible with %u: %s.",
caller, type1_string, report_data->FormatHandle(rp1_state->renderPass).c_str(), type2_string,
report_data->FormatHandle(rp2_state->renderPass).c_str(), primary_attach, secondary_attach, msg);
}
bool CoreChecks::ValidateAttachmentCompatibility(const char *type1_string, const RENDER_PASS_STATE *rp1_state,
const char *type2_string, const RENDER_PASS_STATE *rp2_state,
uint32_t primary_attach, uint32_t secondary_attach, const char *caller,
const char *error_code) const {
bool skip = false;
const auto &primaryPassCI = rp1_state->createInfo;
const auto &secondaryPassCI = rp2_state->createInfo;
if (primaryPassCI.attachmentCount <= primary_attach) {
primary_attach = VK_ATTACHMENT_UNUSED;
}
if (secondaryPassCI.attachmentCount <= secondary_attach) {
secondary_attach = VK_ATTACHMENT_UNUSED;
}
if (primary_attach == VK_ATTACHMENT_UNUSED && secondary_attach == VK_ATTACHMENT_UNUSED) {
return skip;
}
if (primary_attach == VK_ATTACHMENT_UNUSED) {
skip |= LogInvalidAttachmentMessage(type1_string, rp1_state, type2_string, rp2_state, primary_attach, secondary_attach,
"The first is unused while the second is not.", caller, error_code);
return skip;
}
if (secondary_attach == VK_ATTACHMENT_UNUSED) {
skip |= LogInvalidAttachmentMessage(type1_string, rp1_state, type2_string, rp2_state, primary_attach, secondary_attach,
"The second is unused while the first is not.", caller, error_code);
return skip;
}
if (primaryPassCI.pAttachments[primary_attach].format != secondaryPassCI.pAttachments[secondary_attach].format) {
skip |= LogInvalidAttachmentMessage(type1_string, rp1_state, type2_string, rp2_state, primary_attach, secondary_attach,
"They have different formats.", caller, error_code);
}
if (primaryPassCI.pAttachments[primary_attach].samples != secondaryPassCI.pAttachments[secondary_attach].samples) {
skip |= LogInvalidAttachmentMessage(type1_string, rp1_state, type2_string, rp2_state, primary_attach, secondary_attach,
"They have different samples.", caller, error_code);
}
if (primaryPassCI.pAttachments[primary_attach].flags != secondaryPassCI.pAttachments[secondary_attach].flags) {
skip |= LogInvalidAttachmentMessage(type1_string, rp1_state, type2_string, rp2_state, primary_attach, secondary_attach,
"They have different flags.", caller, error_code);
}
return skip;
}
bool CoreChecks::ValidateSubpassCompatibility(const char *type1_string, const RENDER_PASS_STATE *rp1_state,
const char *type2_string, const RENDER_PASS_STATE *rp2_state, const int subpass,
const char *caller, const char *error_code) const {
bool skip = false;
const auto &primary_desc = rp1_state->createInfo.pSubpasses[subpass];
const auto &secondary_desc = rp2_state->createInfo.pSubpasses[subpass];
uint32_t maxInputAttachmentCount = std::max(primary_desc.inputAttachmentCount, secondary_desc.inputAttachmentCount);
for (uint32_t i = 0; i < maxInputAttachmentCount; ++i) {
uint32_t primary_input_attach = VK_ATTACHMENT_UNUSED, secondary_input_attach = VK_ATTACHMENT_UNUSED;
if (i < primary_desc.inputAttachmentCount) {
primary_input_attach = primary_desc.pInputAttachments[i].attachment;
}
if (i < secondary_desc.inputAttachmentCount) {
secondary_input_attach = secondary_desc.pInputAttachments[i].attachment;
}
skip |= ValidateAttachmentCompatibility(type1_string, rp1_state, type2_string, rp2_state, primary_input_attach,
secondary_input_attach, caller, error_code);
}
uint32_t maxColorAttachmentCount = std::max(primary_desc.colorAttachmentCount, secondary_desc.colorAttachmentCount);
for (uint32_t i = 0; i < maxColorAttachmentCount; ++i) {
uint32_t primary_color_attach = VK_ATTACHMENT_UNUSED, secondary_color_attach = VK_ATTACHMENT_UNUSED;
if (i < primary_desc.colorAttachmentCount) {
primary_color_attach = primary_desc.pColorAttachments[i].attachment;
}
if (i < secondary_desc.colorAttachmentCount) {
secondary_color_attach = secondary_desc.pColorAttachments[i].attachment;
}
skip |= ValidateAttachmentCompatibility(type1_string, rp1_state, type2_string, rp2_state, primary_color_attach,
secondary_color_attach, caller, error_code);
if (rp1_state->createInfo.subpassCount > 1) {
uint32_t primary_resolve_attach = VK_ATTACHMENT_UNUSED, secondary_resolve_attach = VK_ATTACHMENT_UNUSED;
if (i < primary_desc.colorAttachmentCount && primary_desc.pResolveAttachments) {
primary_resolve_attach = primary_desc.pResolveAttachments[i].attachment;
}
if (i < secondary_desc.colorAttachmentCount && secondary_desc.pResolveAttachments) {
secondary_resolve_attach = secondary_desc.pResolveAttachments[i].attachment;
}
skip |= ValidateAttachmentCompatibility(type1_string, rp1_state, type2_string, rp2_state, primary_resolve_attach,
secondary_resolve_attach, caller, error_code);
}
}
uint32_t primary_depthstencil_attach = VK_ATTACHMENT_UNUSED, secondary_depthstencil_attach = VK_ATTACHMENT_UNUSED;
if (primary_desc.pDepthStencilAttachment) {
primary_depthstencil_attach = primary_desc.pDepthStencilAttachment[0].attachment;
}
if (secondary_desc.pDepthStencilAttachment) {
secondary_depthstencil_attach = secondary_desc.pDepthStencilAttachment[0].attachment;
}
skip |= ValidateAttachmentCompatibility(type1_string, rp1_state, type2_string, rp2_state, primary_depthstencil_attach,
secondary_depthstencil_attach, caller, error_code);
return skip;
}
// Verify that given renderPass CreateInfo for primary and secondary command buffers are compatible.
// This function deals directly with the CreateInfo, there are overloaded versions below that can take the renderPass handle and
// will then feed into this function
bool CoreChecks::ValidateRenderPassCompatibility(const char *type1_string, const RENDER_PASS_STATE *rp1_state,
const char *type2_string, const RENDER_PASS_STATE *rp2_state, const char *caller,
const char *error_code) const {
bool skip = false;
if (rp1_state->createInfo.subpassCount != rp2_state->createInfo.subpassCount) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
HandleToUint64(rp1_state->renderPass), error_code,
"%s: RenderPasses incompatible between %s w/ %s with a subpassCount of %u and %s w/ "
"%s with a subpassCount of %u.",
caller, type1_string, report_data->FormatHandle(rp1_state->renderPass).c_str(),
rp1_state->createInfo.subpassCount, type2_string, report_data->FormatHandle(rp2_state->renderPass).c_str(),
rp2_state->createInfo.subpassCount);
} else {
for (uint32_t i = 0; i < rp1_state->createInfo.subpassCount; ++i) {
skip |= ValidateSubpassCompatibility(type1_string, rp1_state, type2_string, rp2_state, i, caller, error_code);
}
}
return skip;
}
// For given pipeline, return number of MSAA samples, or one if MSAA disabled
static VkSampleCountFlagBits GetNumSamples(PIPELINE_STATE const *pipe) {
if (pipe->graphicsPipelineCI.pMultisampleState != NULL &&
VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO == pipe->graphicsPipelineCI.pMultisampleState->sType) {
return pipe->graphicsPipelineCI.pMultisampleState->rasterizationSamples;
}
return VK_SAMPLE_COUNT_1_BIT;
}
static void ListBits(std::ostream &s, uint32_t bits) {
for (int i = 0; i < 32 && bits; i++) {
if (bits & (1 << i)) {
s << i;
bits &= ~(1 << i);
if (bits) {
s << ",";
}
}
}
}
// Validate draw-time state related to the PSO
bool CoreChecks::ValidatePipelineDrawtimeState(const LAST_BOUND_STATE &state, const CMD_BUFFER_STATE *pCB, CMD_TYPE cmd_type,
const PIPELINE_STATE *pPipeline, const char *caller) const {
bool skip = false;
const auto ¤t_vtx_bfr_binding_info = pCB->current_vertex_buffer_binding_info.vertex_buffer_bindings;
// Verify vertex binding
if (pPipeline->vertex_binding_descriptions_.size() > 0) {
for (size_t i = 0; i < pPipeline->vertex_binding_descriptions_.size(); i++) {
const auto vertex_binding = pPipeline->vertex_binding_descriptions_[i].binding;
if ((current_vtx_bfr_binding_info.size() < (vertex_binding + 1)) ||
(current_vtx_bfr_binding_info[vertex_binding].buffer == VK_NULL_HANDLE)) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(pCB->commandBuffer), kVUID_Core_DrawState_VtxIndexOutOfBounds,
"%s expects that this Command Buffer's vertex binding Index %u should be set via "
"vkCmdBindVertexBuffers. This is because VkVertexInputBindingDescription struct at "
"index " PRINTF_SIZE_T_SPECIFIER " of pVertexBindingDescriptions has a binding value of %u.",
report_data->FormatHandle(state.pipeline_state->pipeline).c_str(), vertex_binding, i, vertex_binding);
}
}
// Verify vertex attribute address alignment
for (size_t i = 0; i < pPipeline->vertex_attribute_descriptions_.size(); i++) {
const auto &attribute_description = pPipeline->vertex_attribute_descriptions_[i];
const auto vertex_binding = attribute_description.binding;
const auto attribute_offset = attribute_description.offset;
const auto &vertex_binding_map_it = pPipeline->vertex_binding_to_index_map_.find(vertex_binding);
if ((vertex_binding_map_it != pPipeline->vertex_binding_to_index_map_.cend()) &&
(vertex_binding < current_vtx_bfr_binding_info.size()) &&
(current_vtx_bfr_binding_info[vertex_binding].buffer != VK_NULL_HANDLE)) {
const auto vertex_buffer_stride = pPipeline->vertex_binding_descriptions_[vertex_binding_map_it->second].stride;
const auto vertex_buffer_offset = current_vtx_bfr_binding_info[vertex_binding].offset;
// Use 1 as vertex/instance index to use buffer stride as well
const auto attrib_address = vertex_buffer_offset + vertex_buffer_stride + attribute_offset;
VkDeviceSize vtx_attrib_req_alignment = pPipeline->vertex_attribute_alignments_[i];
if (SafeModulo(attrib_address, vtx_attrib_req_alignment) != 0) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
HandleToUint64(current_vtx_bfr_binding_info[vertex_binding].buffer),
kVUID_Core_DrawState_InvalidVtxAttributeAlignment,
"Invalid attribAddress alignment for vertex attribute " PRINTF_SIZE_T_SPECIFIER
" from %s and vertex %s.",
i, report_data->FormatHandle(state.pipeline_state->pipeline).c_str(),
report_data->FormatHandle(current_vtx_bfr_binding_info[vertex_binding].buffer).c_str());
}
}
}
} else {
if ((!current_vtx_bfr_binding_info.empty()) && (!pCB->vertex_buffer_used)) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(pCB->commandBuffer), kVUID_Core_DrawState_VtxIndexOutOfBounds,
"Vertex buffers are bound to %s but no vertex buffers are attached to %s.",
report_data->FormatHandle(pCB->commandBuffer).c_str(),
report_data->FormatHandle(state.pipeline_state->pipeline).c_str());
}
}
// If Viewport or scissors are dynamic, verify that dynamic count matches PSO count.
// Skip check if rasterization is disabled or there is no viewport.
if ((!pPipeline->graphicsPipelineCI.pRasterizationState ||
(pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE)) &&
pPipeline->graphicsPipelineCI.pViewportState) {
bool dynViewport = IsDynamic(pPipeline, VK_DYNAMIC_STATE_VIEWPORT);
bool dynScissor = IsDynamic(pPipeline, VK_DYNAMIC_STATE_SCISSOR);
if (dynViewport) {
const auto requiredViewportsMask = (1 << pPipeline->graphicsPipelineCI.pViewportState->viewportCount) - 1;
const auto missingViewportMask = ~pCB->viewportMask & requiredViewportsMask;
if (missingViewportMask) {
std::stringstream ss;
ss << "Dynamic viewport(s) ";
ListBits(ss, missingViewportMask);
ss << " are used by pipeline state object, but were not provided via calls to vkCmdSetViewport().";
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
kVUID_Core_DrawState_ViewportScissorMismatch, "%s", ss.str().c_str());
}
}
if (dynScissor) {
const auto requiredScissorMask = (1 << pPipeline->graphicsPipelineCI.pViewportState->scissorCount) - 1;
const auto missingScissorMask = ~pCB->scissorMask & requiredScissorMask;
if (missingScissorMask) {
std::stringstream ss;
ss << "Dynamic scissor(s) ";
ListBits(ss, missingScissorMask);
ss << " are used by pipeline state object, but were not provided via calls to vkCmdSetScissor().";
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
kVUID_Core_DrawState_ViewportScissorMismatch, "%s", ss.str().c_str());
}
}
}
// Verify that any MSAA request in PSO matches sample# in bound FB
// Skip the check if rasterization is disabled.
if (!pPipeline->graphicsPipelineCI.pRasterizationState ||
(pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE)) {
VkSampleCountFlagBits pso_num_samples = GetNumSamples(pPipeline);
if (pCB->activeRenderPass) {
const auto render_pass_info = pCB->activeRenderPass->createInfo.ptr();
const VkSubpassDescription2KHR *subpass_desc = &render_pass_info->pSubpasses[pCB->activeSubpass];
uint32_t i;
unsigned subpass_num_samples = 0;
for (i = 0; i < subpass_desc->colorAttachmentCount; i++) {
const auto attachment = subpass_desc->pColorAttachments[i].attachment;
if (attachment != VK_ATTACHMENT_UNUSED)
subpass_num_samples |= (unsigned)render_pass_info->pAttachments[attachment].samples;
}
if (subpass_desc->pDepthStencilAttachment &&
subpass_desc->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
const auto attachment = subpass_desc->pDepthStencilAttachment->attachment;
subpass_num_samples |= (unsigned)render_pass_info->pAttachments[attachment].samples;
}
if (!(device_extensions.vk_amd_mixed_attachment_samples || device_extensions.vk_nv_framebuffer_mixed_samples) &&
((subpass_num_samples & static_cast<unsigned>(pso_num_samples)) != subpass_num_samples)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
HandleToUint64(pPipeline->pipeline), kVUID_Core_DrawState_NumSamplesMismatch,
"Num samples mismatch! At draw-time in %s with %u samples while current %s w/ "
"%u samples!",
report_data->FormatHandle(pPipeline->pipeline).c_str(), pso_num_samples,
report_data->FormatHandle(pCB->activeRenderPass->renderPass).c_str(), subpass_num_samples);
}
} else {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
HandleToUint64(pPipeline->pipeline), kVUID_Core_DrawState_NoActiveRenderpass,
"No active render pass found at draw-time in %s!", report_data->FormatHandle(pPipeline->pipeline).c_str());
}
}
// Verify that PSO creation renderPass is compatible with active renderPass
if (pCB->activeRenderPass) {
// TODO: Move all of the error codes common across different Draws into a LUT accessed by cmd_type
// TODO: AMD extension codes are included here, but actual function entrypoints are not yet intercepted
// Error codes for renderpass and subpass mismatches
auto rp_error = "VUID-vkCmdDraw-renderPass-02684", sp_error = "VUID-vkCmdDraw-subpass-02685";
switch (cmd_type) {
case CMD_DRAWINDEXED:
rp_error = "VUID-vkCmdDrawIndexed-renderPass-02684";
sp_error = "VUID-vkCmdDrawIndexed-subpass-02685";
break;
case CMD_DRAWINDIRECT:
rp_error = "VUID-vkCmdDrawIndirect-renderPass-02684";
sp_error = "VUID-vkCmdDrawIndirect-subpass-02685";
break;
case CMD_DRAWINDIRECTCOUNTKHR:
rp_error = "VUID-vkCmdDrawIndirectCountKHR-renderPass-02684";
sp_error = "VUID-vkCmdDrawIndirectCountKHR-subpass-02685";
break;
case CMD_DRAWINDEXEDINDIRECT:
rp_error = "VUID-vkCmdDrawIndexedIndirect-renderPass-02684";
sp_error = "VUID-vkCmdDrawIndexedIndirect-subpass-02685";
break;
case CMD_DRAWINDEXEDINDIRECTCOUNTKHR:
rp_error = "VUID-vkCmdDrawIndexedIndirectCountKHR-renderPass-02684";
sp_error = "VUID-vkCmdDrawIndexedIndirectCountKHR-subpass-02685";
break;
case CMD_DRAWMESHTASKSNV:
rp_error = "VUID-vkCmdDrawMeshTasksNV-renderPass-02684";
sp_error = "VUID-vkCmdDrawMeshTasksNV-subpass-02685";
break;
case CMD_DRAWMESHTASKSINDIRECTNV:
rp_error = "VUID-vkCmdDrawMeshTasksIndirectNV-renderPass-02684";
sp_error = "VUID-vkCmdDrawMeshTasksIndirectNV-subpass-02685";
break;
case CMD_DRAWMESHTASKSINDIRECTCOUNTNV:
rp_error = "VUID-vkCmdDrawMeshTasksIndirectCountNV-renderPass-02684";
sp_error = "VUID-vkCmdDrawMeshTasksIndirectCountNV-subpass-02685";
break;
default:
assert(CMD_DRAW == cmd_type);
break;
}
if (pCB->activeRenderPass->renderPass != pPipeline->rp_state->renderPass) {
// renderPass that PSO was created with must be compatible with active renderPass that PSO is being used with
skip |= ValidateRenderPassCompatibility("active render pass", pCB->activeRenderPass, "pipeline state object",
pPipeline->rp_state.get(), caller, rp_error);
}
if (pPipeline->graphicsPipelineCI.subpass != pCB->activeSubpass) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
HandleToUint64(pPipeline->pipeline), sp_error, "Pipeline was built for subpass %u but used in subpass %u.",
pPipeline->graphicsPipelineCI.subpass, pCB->activeSubpass);
}
}
return skip;
}
// For given cvdescriptorset::DescriptorSet, verify that its Set is compatible w/ the setLayout corresponding to
// pipelineLayout[layoutIndex]
static bool VerifySetLayoutCompatibility(const debug_report_data *report_data, const cvdescriptorset::DescriptorSet *descriptor_set,
PIPELINE_LAYOUT_STATE const *pipeline_layout, const uint32_t layoutIndex,
string &errorMsg) {
auto num_sets = pipeline_layout->set_layouts.size();
if (layoutIndex >= num_sets) {
stringstream errorStr;
errorStr << report_data->FormatHandle(pipeline_layout->layout) << ") only contains " << num_sets
<< " setLayouts corresponding to sets 0-" << num_sets - 1 << ", but you're attempting to bind set to index "
<< layoutIndex;
errorMsg = errorStr.str();
return false;
}
if (descriptor_set->IsPushDescriptor()) return true;
auto layout_node = pipeline_layout->set_layouts[layoutIndex].get();
return cvdescriptorset::VerifySetLayoutCompatibility(report_data, layout_node, descriptor_set->GetLayout().get(), &errorMsg);
}
static const char *string_VuidNotCompatibleForSet(CMD_TYPE cmd_type) {
const static std::map<CMD_TYPE, const char *> incompatible_for_set_vuid = {
{CMD_DISPATCH, "VUID-vkCmdDispatch-None-02697"},
{CMD_DISPATCHINDIRECT, "VUID-vkCmdDispatchIndirect-None-02697"},
{CMD_DRAW, "VUID-vkCmdDraw-None-02697"},
{CMD_DRAWINDEXED, "VUID-vkCmdDrawIndexed-None-02697"},
{CMD_DRAWINDEXEDINDIRECT, "VUID-vkCmdDrawIndexedIndirect-None-02697"},
{CMD_DRAWINDEXEDINDIRECTCOUNTKHR, "VUID-vkCmdDrawIndexedIndirectCountKHR-None-02697"},
{CMD_DRAWINDIRECT, "VUID-vkCmdDrawIndirect-None-02697"},
{CMD_DRAWINDIRECTCOUNTKHR, "VUID-vkCmdDrawIndirectCountKHR-None-02697"},
{CMD_DRAWMESHTASKSINDIRECTCOUNTNV, "VUID-vkCmdDrawMeshTasksIndirectCountNV-None-02697"},
{CMD_DRAWMESHTASKSINDIRECTNV, "VUID-vkCmdDrawMeshTasksIndirectNV-None-02697"},
{CMD_DRAWMESHTASKSNV, "VUID-vkCmdDrawMeshTasksNV-None-02697"},
// Not implemented on this path...
// { CMD_DRAWDISPATCHBASE, "VUID-vkCmdDispatchBase-None-02697" },
// { CMD_DRAWINDIRECTBYTECOUNTEXT, "VUID-vkCmdDrawIndirectByteCountEXT-None-02697"},
{CMD_TRACERAYSNV, "VUID-vkCmdTraceRaysNV-None-02697"},
};
auto find_it = incompatible_for_set_vuid.find(cmd_type);
if (find_it == incompatible_for_set_vuid.cend()) {
assert(find_it != incompatible_for_set_vuid.cend());
return "BAD VUID -- Unknown Command Type";
}
return find_it->second;
}
// Validate overall state at the time of a draw call
bool CoreChecks::ValidateCmdBufDrawState(const CMD_BUFFER_STATE *cb_node, CMD_TYPE cmd_type, const bool indexed,
const VkPipelineBindPoint bind_point, const char *function, const char *pipe_err_code,
const char *state_err_code) const {
const auto last_bound_it = cb_node->lastBound.find(bind_point);
const PIPELINE_STATE *pPipe = nullptr;
if (last_bound_it != cb_node->lastBound.cend()) {
pPipe = last_bound_it->second.pipeline_state;
}
if (nullptr == pPipe) {
return log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_node->commandBuffer), pipe_err_code,
"Must not call %s on this command buffer while there is no %s pipeline bound.", function,
bind_point == VK_PIPELINE_BIND_POINT_GRAPHICS ? "Graphics" : "Compute");
}
bool result = false;
auto const &state = last_bound_it->second;
// First check flag states
if (VK_PIPELINE_BIND_POINT_GRAPHICS == bind_point) result = ValidateDrawStateFlags(cb_node, pPipe, indexed, state_err_code);
// Now complete other state checks
string errorString;
auto const &pipeline_layout = pPipe->pipeline_layout.get();
// Check if the current pipeline is compatible for the maximum used set with the bound sets.
if (pPipe->active_slots.size() > 0 && !CompatForSet(pPipe->max_active_slot, state, pipeline_layout->compat_for_set)) {
result |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
HandleToUint64(pPipe->pipeline), string_VuidNotCompatibleForSet(cmd_type),
"%s(): %s defined with %s is not compatible for maximum set statically used %" PRIu32
" with bound descriptor sets, last bound with %s",
command_name_list[cmd_type], report_data->FormatHandle(pPipe->pipeline).c_str(),
report_data->FormatHandle(pipeline_layout->layout).c_str(), pPipe->max_active_slot,
report_data->FormatHandle(state.pipeline_layout).c_str());
}
for (const auto &set_binding_pair : pPipe->active_slots) {
uint32_t setIndex = set_binding_pair.first;
// If valid set is not bound throw an error
if ((state.per_set.size() <= setIndex) || (!state.per_set[setIndex].bound_descriptor_set)) {
result |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_node->commandBuffer), kVUID_Core_DrawState_DescriptorSetNotBound,
"%s uses set #%u but that set is not bound.", report_data->FormatHandle(pPipe->pipeline).c_str(), setIndex);
} else if (!VerifySetLayoutCompatibility(report_data, state.per_set[setIndex].bound_descriptor_set, pipeline_layout,
setIndex, errorString)) {
// Set is bound but not compatible w/ overlapping pipeline_layout from PSO
VkDescriptorSet setHandle = state.per_set[setIndex].bound_descriptor_set->GetSet();
result |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
HandleToUint64(setHandle), kVUID_Core_DrawState_PipelineLayoutsIncompatible,
"%s bound as set #%u is not compatible with overlapping %s due to: %s",
report_data->FormatHandle(setHandle).c_str(), setIndex,
report_data->FormatHandle(pipeline_layout->layout).c_str(), errorString.c_str());
} else { // Valid set is bound and layout compatible, validate that it's updated
// Pull the set node
const cvdescriptorset::DescriptorSet *descriptor_set = state.per_set[setIndex].bound_descriptor_set;
// Validate the draw-time state for this descriptor set
std::string err_str;
if (!descriptor_set->IsPushDescriptor()) {
// For the "bindless" style resource usage with many descriptors, need to optimize command <-> descriptor
// binding validation. Take the requested binding set and prefilter it to eliminate redundant validation checks.
// Here, the currently bound pipeline determines whether an image validation check is redundant...
// for images are the "req" portion of the binding_req is indirectly (but tightly) coupled to the pipeline.
cvdescriptorset::PrefilterBindRequestMap reduced_map(*descriptor_set, set_binding_pair.second);
const auto &binding_req_map = reduced_map.FilteredMap(*cb_node, *pPipe);
// We can skip validating the descriptor set if "nothing" has changed since the last validation.
// Same set, no image layout changes, and same "pipeline state" (binding_req_map). If there are
// any dynamic descriptors, always revalidate rather than caching the values. We currently only
// apply this optimization if IsManyDescriptors is true, to avoid the overhead of copying the
// binding_req_map which could potentially be expensive.
bool descriptor_set_changed =
!reduced_map.IsManyDescriptors() ||
// Revalidate each time if the set has dynamic offsets
state.per_set[setIndex].dynamicOffsets.size() > 0 ||
// Revalidate if descriptor set (or contents) has changed
state.per_set[setIndex].validated_set != descriptor_set ||
state.per_set[setIndex].validated_set_change_count != descriptor_set->GetChangeCount() ||
(!disabled.image_layout_validation &&
state.per_set[setIndex].validated_set_image_layout_change_count != cb_node->image_layout_change_count);
bool need_validate = descriptor_set_changed ||
// Revalidate if previous bindingReqMap doesn't include new bindingReqMap
!std::includes(state.per_set[setIndex].validated_set_binding_req_map.begin(),
state.per_set[setIndex].validated_set_binding_req_map.end(),
binding_req_map.begin(), binding_req_map.end());
if (need_validate) {
bool success;
if (!descriptor_set_changed && reduced_map.IsManyDescriptors()) {
// Only validate the bindings that haven't already been validated
BindingReqMap delta_reqs;
std::set_difference(binding_req_map.begin(), binding_req_map.end(),
state.per_set[setIndex].validated_set_binding_req_map.begin(),
state.per_set[setIndex].validated_set_binding_req_map.end(),
std::inserter(delta_reqs, delta_reqs.begin()));
success = ValidateDrawState(descriptor_set, delta_reqs, state.per_set[setIndex].dynamicOffsets, cb_node,
function, &err_str);
} else {
success = ValidateDrawState(descriptor_set, binding_req_map, state.per_set[setIndex].dynamicOffsets,
cb_node, function, &err_str);
}
if (!success) {
auto set = descriptor_set->GetSet();
result |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
HandleToUint64(set), kVUID_Core_DrawState_DescriptorSetNotUpdated,
"%s bound as set #%u encountered the following validation error at %s time: %s",
report_data->FormatHandle(set).c_str(), setIndex, function, err_str.c_str());
}
}
}
}
}
// Check general pipeline state that needs to be validated at drawtime
if (VK_PIPELINE_BIND_POINT_GRAPHICS == bind_point)
result |= ValidatePipelineDrawtimeState(state, cb_node, cmd_type, pPipe, function);
return result;
}
bool CoreChecks::ValidatePipelineLocked(std::vector<std::shared_ptr<PIPELINE_STATE>> const &pPipelines, int pipelineIndex) const {
bool skip = false;
const PIPELINE_STATE *pPipeline = pPipelines[pipelineIndex].get();
// If create derivative bit is set, check that we've specified a base
// pipeline correctly, and that the base pipeline was created to allow
// derivatives.
if (pPipeline->graphicsPipelineCI.flags & VK_PIPELINE_CREATE_DERIVATIVE_BIT) {
const PIPELINE_STATE *pBasePipeline = nullptr;
if (!((pPipeline->graphicsPipelineCI.basePipelineHandle != VK_NULL_HANDLE) ^
(pPipeline->graphicsPipelineCI.basePipelineIndex != -1))) {
// This check is a superset of "VUID-VkGraphicsPipelineCreateInfo-flags-00724" and
// "VUID-VkGraphicsPipelineCreateInfo-flags-00725"
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(device), kVUID_Core_DrawState_InvalidPipelineCreateState,
"Invalid Pipeline CreateInfo: exactly one of base pipeline index and handle must be specified");
} else if (pPipeline->graphicsPipelineCI.basePipelineIndex != -1) {
if (pPipeline->graphicsPipelineCI.basePipelineIndex >= pipelineIndex) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(device), "VUID-vkCreateGraphicsPipelines-flags-00720",
"Invalid Pipeline CreateInfo: base pipeline must occur earlier in array than derivative pipeline.");
} else {
pBasePipeline = pPipelines[pPipeline->graphicsPipelineCI.basePipelineIndex].get();
}
} else if (pPipeline->graphicsPipelineCI.basePipelineHandle != VK_NULL_HANDLE) {
pBasePipeline = GetPipelineState(pPipeline->graphicsPipelineCI.basePipelineHandle);
}
if (pBasePipeline && !(pBasePipeline->graphicsPipelineCI.flags & VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(device), kVUID_Core_DrawState_InvalidPipelineCreateState,
"Invalid Pipeline CreateInfo: base pipeline does not allow derivatives.");
}
}
return skip;
}
// UNLOCKED pipeline validation. DO NOT lookup objects in the CoreChecks->* maps in this function.
bool CoreChecks::ValidatePipelineUnlocked(const PIPELINE_STATE *pPipeline, uint32_t pipelineIndex) const {
bool skip = false;
// Ensure the subpass index is valid. If not, then ValidateGraphicsPipelineShaderState
// produces nonsense errors that confuse users. Other layers should already
// emit errors for renderpass being invalid.
auto subpass_desc = &pPipeline->rp_state->createInfo.pSubpasses[pPipeline->graphicsPipelineCI.subpass];
if (pPipeline->graphicsPipelineCI.subpass >= pPipeline->rp_state->createInfo.subpassCount) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device),
"VUID-VkGraphicsPipelineCreateInfo-subpass-00759",
"Invalid Pipeline CreateInfo State: Subpass index %u is out of range for this renderpass (0..%u).",
pPipeline->graphicsPipelineCI.subpass, pPipeline->rp_state->createInfo.subpassCount - 1);
subpass_desc = nullptr;
}
if (pPipeline->graphicsPipelineCI.pColorBlendState != NULL) {
const safe_VkPipelineColorBlendStateCreateInfo *color_blend_state = pPipeline->graphicsPipelineCI.pColorBlendState;
if (subpass_desc && color_blend_state->attachmentCount != subpass_desc->colorAttachmentCount) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device),
"VUID-VkGraphicsPipelineCreateInfo-attachmentCount-00746",
"vkCreateGraphicsPipelines(): %s subpass %u has colorAttachmentCount of %u which doesn't "
"match the pColorBlendState->attachmentCount of %u.",
report_data->FormatHandle(pPipeline->rp_state->renderPass).c_str(), pPipeline->graphicsPipelineCI.subpass,
subpass_desc->colorAttachmentCount, color_blend_state->attachmentCount);
}
if (!enabled_features.core.independentBlend) {
if (pPipeline->attachments.size() > 1) {
const VkPipelineColorBlendAttachmentState *const pAttachments = &pPipeline->attachments[0];
for (size_t i = 1; i < pPipeline->attachments.size(); i++) {
// Quoting the spec: "If [the independent blend] feature is not enabled, the VkPipelineColorBlendAttachmentState
// settings for all color attachments must be identical." VkPipelineColorBlendAttachmentState contains
// only attachment state, so memcmp is best suited for the comparison
if (memcmp(static_cast<const void *>(pAttachments), static_cast<const void *>(&pAttachments[i]),
sizeof(pAttachments[0]))) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(device), "VUID-VkPipelineColorBlendStateCreateInfo-pAttachments-00605",
"Invalid Pipeline CreateInfo: If independent blend feature not enabled, all elements of "
"pAttachments must be identical.");
break;
}
}
}
}
if (!enabled_features.core.logicOp && (pPipeline->graphicsPipelineCI.pColorBlendState->logicOpEnable != VK_FALSE)) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device),
"VUID-VkPipelineColorBlendStateCreateInfo-logicOpEnable-00606",
"Invalid Pipeline CreateInfo: If logic operations feature not enabled, logicOpEnable must be VK_FALSE.");
}
for (size_t i = 0; i < pPipeline->attachments.size(); i++) {
if ((pPipeline->attachments[i].srcColorBlendFactor == VK_BLEND_FACTOR_SRC1_COLOR) ||
(pPipeline->attachments[i].srcColorBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR) ||
(pPipeline->attachments[i].srcColorBlendFactor == VK_BLEND_FACTOR_SRC1_ALPHA) ||
(pPipeline->attachments[i].srcColorBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA)) {
if (!enabled_features.core.dualSrcBlend) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(device), "VUID-VkPipelineColorBlendAttachmentState-srcColorBlendFactor-00608",
"vkCreateGraphicsPipelines(): pPipelines[%d].pColorBlendState.pAttachments[" PRINTF_SIZE_T_SPECIFIER
"].srcColorBlendFactor uses a dual-source blend factor (%d), but this device feature is not "
"enabled.",
pipelineIndex, i, pPipeline->attachments[i].srcColorBlendFactor);
}
}
if ((pPipeline->attachments[i].dstColorBlendFactor == VK_BLEND_FACTOR_SRC1_COLOR) ||
(pPipeline->attachments[i].dstColorBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR) ||
(pPipeline->attachments[i].dstColorBlendFactor == VK_BLEND_FACTOR_SRC1_ALPHA) ||
(pPipeline->attachments[i].dstColorBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA)) {
if (!enabled_features.core.dualSrcBlend) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(device), "VUID-VkPipelineColorBlendAttachmentState-dstColorBlendFactor-00609",
"vkCreateGraphicsPipelines(): pPipelines[%d].pColorBlendState.pAttachments[" PRINTF_SIZE_T_SPECIFIER
"].dstColorBlendFactor uses a dual-source blend factor (%d), but this device feature is not "
"enabled.",
pipelineIndex, i, pPipeline->attachments[i].dstColorBlendFactor);
}
}
if ((pPipeline->attachments[i].srcAlphaBlendFactor == VK_BLEND_FACTOR_SRC1_COLOR) ||
(pPipeline->attachments[i].srcAlphaBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR) ||
(pPipeline->attachments[i].srcAlphaBlendFactor == VK_BLEND_FACTOR_SRC1_ALPHA) ||
(pPipeline->attachments[i].srcAlphaBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA)) {
if (!enabled_features.core.dualSrcBlend) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(device), "VUID-VkPipelineColorBlendAttachmentState-srcAlphaBlendFactor-00610",
"vkCreateGraphicsPipelines(): pPipelines[%d].pColorBlendState.pAttachments[" PRINTF_SIZE_T_SPECIFIER
"].srcAlphaBlendFactor uses a dual-source blend factor (%d), but this device feature is not "
"enabled.",
pipelineIndex, i, pPipeline->attachments[i].srcAlphaBlendFactor);
}
}
if ((pPipeline->attachments[i].dstAlphaBlendFactor == VK_BLEND_FACTOR_SRC1_COLOR) ||
(pPipeline->attachments[i].dstAlphaBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR) ||
(pPipeline->attachments[i].dstAlphaBlendFactor == VK_BLEND_FACTOR_SRC1_ALPHA) ||
(pPipeline->attachments[i].dstAlphaBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA)) {
if (!enabled_features.core.dualSrcBlend) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(device), "VUID-VkPipelineColorBlendAttachmentState-dstAlphaBlendFactor-00611",
"vkCreateGraphicsPipelines(): pPipelines[%d].pColorBlendState.pAttachments[" PRINTF_SIZE_T_SPECIFIER
"].dstAlphaBlendFactor uses a dual-source blend factor (%d), but this device feature is not "
"enabled.",
pipelineIndex, i, pPipeline->attachments[i].dstAlphaBlendFactor);
}
}
}
}
if (ValidateGraphicsPipelineShaderState(pPipeline)) {
skip = true;
}
// Each shader's stage must be unique
if (pPipeline->duplicate_shaders) {
for (uint32_t stage = VK_SHADER_STAGE_VERTEX_BIT; stage & VK_SHADER_STAGE_ALL_GRAPHICS; stage <<= 1) {
if (pPipeline->duplicate_shaders & stage) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(device), kVUID_Core_DrawState_InvalidPipelineCreateState,
"Invalid Pipeline CreateInfo State: Multiple shaders provided for stage %s",
string_VkShaderStageFlagBits(VkShaderStageFlagBits(stage)));
}
}
}
if (device_extensions.vk_nv_mesh_shader) {
// VS or mesh is required
if (!(pPipeline->active_shaders & (VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_MESH_BIT_NV))) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(device), "VUID-VkGraphicsPipelineCreateInfo-stage-02096",
"Invalid Pipeline CreateInfo State: Vertex Shader or Mesh Shader required.");
}
// Can't mix mesh and VTG
if ((pPipeline->active_shaders & (VK_SHADER_STAGE_MESH_BIT_NV | VK_SHADER_STAGE_TASK_BIT_NV)) &&
(pPipeline->active_shaders &
(VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_GEOMETRY_BIT | VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT |
VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT))) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(device), "VUID-VkGraphicsPipelineCreateInfo-pStages-02095",
"Invalid Pipeline CreateInfo State: Geometric shader stages must either be all mesh (mesh | task) "
"or all VTG (vertex, tess control, tess eval, geom).");
}
} else {
// VS is required
if (!(pPipeline->active_shaders & VK_SHADER_STAGE_VERTEX_BIT)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(device), "VUID-VkGraphicsPipelineCreateInfo-stage-00727",
"Invalid Pipeline CreateInfo State: Vertex Shader required.");
}
}
if (!enabled_features.mesh_shader.meshShader && (pPipeline->active_shaders & VK_SHADER_STAGE_MESH_BIT_NV)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device),
"VUID-VkPipelineShaderStageCreateInfo-stage-02091",
"Invalid Pipeline CreateInfo State: Mesh Shader not supported.");
}
if (!enabled_features.mesh_shader.taskShader && (pPipeline->active_shaders & VK_SHADER_STAGE_TASK_BIT_NV)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device),
"VUID-VkPipelineShaderStageCreateInfo-stage-02092",
"Invalid Pipeline CreateInfo State: Task Shader not supported.");
}
// Either both or neither TC/TE shaders should be defined
bool has_control = (pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT) != 0;
bool has_eval = (pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT) != 0;
if (has_control && !has_eval) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device),
"VUID-VkGraphicsPipelineCreateInfo-pStages-00729",
"Invalid Pipeline CreateInfo State: TE and TC shaders must be included or excluded as a pair.");
}
if (!has_control && has_eval) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device),
"VUID-VkGraphicsPipelineCreateInfo-pStages-00730",
"Invalid Pipeline CreateInfo State: TE and TC shaders must be included or excluded as a pair.");
}
// Compute shaders should be specified independent of Gfx shaders
if (pPipeline->active_shaders & VK_SHADER_STAGE_COMPUTE_BIT) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device),
"VUID-VkGraphicsPipelineCreateInfo-stage-00728",
"Invalid Pipeline CreateInfo State: Do not specify Compute Shader for Gfx Pipeline.");
}
if ((pPipeline->active_shaders & VK_SHADER_STAGE_VERTEX_BIT) && !pPipeline->graphicsPipelineCI.pInputAssemblyState) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device),
"VUID-VkGraphicsPipelineCreateInfo-pStages-02098",
"Invalid Pipeline CreateInfo State: Missing pInputAssemblyState.");
}
// VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive topology is only valid for tessellation pipelines.
// Mismatching primitive topology and tessellation fails graphics pipeline creation.
if (has_control && has_eval &&
(!pPipeline->graphicsPipelineCI.pInputAssemblyState ||
pPipeline->graphicsPipelineCI.pInputAssemblyState->topology != VK_PRIMITIVE_TOPOLOGY_PATCH_LIST)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device),
"VUID-VkGraphicsPipelineCreateInfo-pStages-00736",
"Invalid Pipeline CreateInfo State: VK_PRIMITIVE_TOPOLOGY_PATCH_LIST must be set as IA topology for "
"tessellation pipelines.");
}
if (pPipeline->graphicsPipelineCI.pInputAssemblyState) {
if (pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_PATCH_LIST) {
if (!has_control || !has_eval) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(device), "VUID-VkGraphicsPipelineCreateInfo-topology-00737",
"Invalid Pipeline CreateInfo State: VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive topology is only valid "
"for tessellation pipelines.");
}
}
if ((pPipeline->graphicsPipelineCI.pInputAssemblyState->primitiveRestartEnable == VK_TRUE) &&
(pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_POINT_LIST ||
pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_LINE_LIST ||
pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST ||
pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY ||
pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY ||
pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_PATCH_LIST)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(device), "VUID-VkPipelineInputAssemblyStateCreateInfo-topology-00428",
"topology is %s and primitiveRestartEnable is VK_TRUE. It is invalid.",
string_VkPrimitiveTopology(pPipeline->graphicsPipelineCI.pInputAssemblyState->topology));
}
if ((enabled_features.core.geometryShader == VK_FALSE) &&
(pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY ||
pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_WITH_ADJACENCY ||
pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY ||
pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(device), "VUID-VkPipelineInputAssemblyStateCreateInfo-topology-00429",
"topology is %s and geometry shaders feature is not enabled. It is invalid.",
string_VkPrimitiveTopology(pPipeline->graphicsPipelineCI.pInputAssemblyState->topology));
}
if ((enabled_features.core.tessellationShader == VK_FALSE) &&
(pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_PATCH_LIST)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(device), "VUID-VkPipelineInputAssemblyStateCreateInfo-topology-00430",
"topology is %s and tessellation shaders feature is not enabled. It is invalid.",
string_VkPrimitiveTopology(pPipeline->graphicsPipelineCI.pInputAssemblyState->topology));
}
}
// If a rasterization state is provided...
if (pPipeline->graphicsPipelineCI.pRasterizationState) {
if ((pPipeline->graphicsPipelineCI.pRasterizationState->depthClampEnable == VK_TRUE) &&
(!enabled_features.core.depthClamp)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(device), "VUID-VkPipelineRasterizationStateCreateInfo-depthClampEnable-00782",
"vkCreateGraphicsPipelines(): the depthClamp device feature is disabled: the depthClampEnable member "
"of the VkPipelineRasterizationStateCreateInfo structure must be set to VK_FALSE.");
}
if (!IsDynamic(pPipeline, VK_DYNAMIC_STATE_DEPTH_BIAS) &&
(pPipeline->graphicsPipelineCI.pRasterizationState->depthBiasClamp != 0.0) && (!enabled_features.core.depthBiasClamp)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(device), kVUID_Core_DrawState_InvalidFeature,
"vkCreateGraphicsPipelines(): the depthBiasClamp device feature is disabled: the depthBiasClamp member "
"of the VkPipelineRasterizationStateCreateInfo structure must be set to 0.0 unless the "
"VK_DYNAMIC_STATE_DEPTH_BIAS dynamic state is enabled");
}
// If rasterization is enabled...
if (pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE) {
if ((pPipeline->graphicsPipelineCI.pMultisampleState->alphaToOneEnable == VK_TRUE) &&
(!enabled_features.core.alphaToOne)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(device), "VUID-VkPipelineMultisampleStateCreateInfo-alphaToOneEnable-00785",
"vkCreateGraphicsPipelines(): the alphaToOne device feature is disabled: the alphaToOneEnable "
"member of the VkPipelineMultisampleStateCreateInfo structure must be set to VK_FALSE.");
}
// If subpass uses a depth/stencil attachment, pDepthStencilState must be a pointer to a valid structure
if (subpass_desc && subpass_desc->pDepthStencilAttachment &&
subpass_desc->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
if (!pPipeline->graphicsPipelineCI.pDepthStencilState) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(device), "VUID-VkGraphicsPipelineCreateInfo-rasterizerDiscardEnable-00752",
"Invalid Pipeline CreateInfo State: pDepthStencilState is NULL when rasterization is enabled "
"and subpass uses a depth/stencil attachment.");
} else if ((pPipeline->graphicsPipelineCI.pDepthStencilState->depthBoundsTestEnable == VK_TRUE) &&
(!enabled_features.core.depthBounds)) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(device), "VUID-VkPipelineDepthStencilStateCreateInfo-depthBoundsTestEnable-00598",
"vkCreateGraphicsPipelines(): the depthBounds device feature is disabled: the "
"depthBoundsTestEnable member of the VkPipelineDepthStencilStateCreateInfo structure must be "
"set to VK_FALSE.");
}
}
// If subpass uses color attachments, pColorBlendState must be valid pointer
if (subpass_desc) {
uint32_t color_attachment_count = 0;
for (uint32_t i = 0; i < subpass_desc->colorAttachmentCount; ++i) {
if (subpass_desc->pColorAttachments[i].attachment != VK_ATTACHMENT_UNUSED) {
++color_attachment_count;
}
}
if (color_attachment_count > 0 && pPipeline->graphicsPipelineCI.pColorBlendState == nullptr) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(device), "VUID-VkGraphicsPipelineCreateInfo-rasterizerDiscardEnable-00753",
"Invalid Pipeline CreateInfo State: pColorBlendState is NULL when rasterization is enabled and "
"subpass uses color attachments.");
}
}
}
}
if ((pPipeline->active_shaders & VK_SHADER_STAGE_VERTEX_BIT) && !pPipeline->graphicsPipelineCI.pVertexInputState) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device),
"VUID-VkGraphicsPipelineCreateInfo-pStages-02097",
"Invalid Pipeline CreateInfo State: Missing pVertexInputState.");
}
auto vi = pPipeline->graphicsPipelineCI.pVertexInputState;
if (vi != NULL) {
for (uint32_t j = 0; j < vi->vertexAttributeDescriptionCount; j++) {
VkFormat format = vi->pVertexAttributeDescriptions[j].format;
// Internal call to get format info. Still goes through layers, could potentially go directly to ICD.
VkFormatProperties properties;
DispatchGetPhysicalDeviceFormatProperties(physical_device, format, &properties);
if ((properties.bufferFeatures & VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT) == 0) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(device), "VUID-VkVertexInputAttributeDescription-format-00623",
"vkCreateGraphicsPipelines: pCreateInfo[%d].pVertexInputState->vertexAttributeDescriptions[%d].format "
"(%s) is not a supported vertex buffer format.",
pipelineIndex, j, string_VkFormat(format));
}
}
}
if (subpass_desc && pPipeline->graphicsPipelineCI.pMultisampleState) {
auto accumColorSamples = [subpass_desc, pPipeline](uint32_t &samples) {
for (uint32_t i = 0; i < subpass_desc->colorAttachmentCount; i++) {
const auto attachment = subpass_desc->pColorAttachments[i].attachment;
if (attachment != VK_ATTACHMENT_UNUSED) {
samples |= static_cast<uint32_t>(pPipeline->rp_state->createInfo.pAttachments[attachment].samples);
}
}
};
if (!(device_extensions.vk_amd_mixed_attachment_samples || device_extensions.vk_nv_framebuffer_mixed_samples)) {
uint32_t raster_samples = static_cast<uint32_t>(GetNumSamples(pPipeline));
uint32_t subpass_num_samples = 0;
accumColorSamples(subpass_num_samples);
if (subpass_desc->pDepthStencilAttachment &&
subpass_desc->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
const auto attachment = subpass_desc->pDepthStencilAttachment->attachment;
subpass_num_samples |= static_cast<uint32_t>(pPipeline->rp_state->createInfo.pAttachments[attachment].samples);
}
// subpass_num_samples is 0 when the subpass has no attachments or if all attachments are VK_ATTACHMENT_UNUSED.
// Only validate the value of subpass_num_samples if the subpass has attachments that are not VK_ATTACHMENT_UNUSED.
if (subpass_num_samples && (!IsPowerOfTwo(subpass_num_samples) || (subpass_num_samples != raster_samples))) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(device), "VUID-VkGraphicsPipelineCreateInfo-subpass-00757",
"vkCreateGraphicsPipelines: pCreateInfo[%d].pMultisampleState->rasterizationSamples (%u) "
"does not match the number of samples of the RenderPass color and/or depth attachment.",
pipelineIndex, raster_samples);
}
}
if (device_extensions.vk_amd_mixed_attachment_samples) {
VkSampleCountFlagBits max_sample_count = static_cast<VkSampleCountFlagBits>(0);
for (uint32_t i = 0; i < subpass_desc->colorAttachmentCount; ++i) {
if (subpass_desc->pColorAttachments[i].attachment != VK_ATTACHMENT_UNUSED) {
max_sample_count = std::max(
max_sample_count,
pPipeline->rp_state->createInfo.pAttachments[subpass_desc->pColorAttachments[i].attachment].samples);
}
}
if (subpass_desc->pDepthStencilAttachment &&
subpass_desc->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
max_sample_count = std::max(
max_sample_count,
pPipeline->rp_state->createInfo.pAttachments[subpass_desc->pDepthStencilAttachment->attachment].samples);
}
if ((pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE) &&
(pPipeline->graphicsPipelineCI.pMultisampleState->rasterizationSamples != max_sample_count)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(device), "VUID-VkGraphicsPipelineCreateInfo-subpass-01505",
"vkCreateGraphicsPipelines: pCreateInfo[%d].pMultisampleState->rasterizationSamples (%s) != max "
"attachment samples (%s) used in subpass %u.",
pipelineIndex,
string_VkSampleCountFlagBits(pPipeline->graphicsPipelineCI.pMultisampleState->rasterizationSamples),
string_VkSampleCountFlagBits(max_sample_count), pPipeline->graphicsPipelineCI.subpass);
}
}
if (device_extensions.vk_nv_framebuffer_mixed_samples) {
uint32_t raster_samples = static_cast<uint32_t>(GetNumSamples(pPipeline));
uint32_t subpass_color_samples = 0;
accumColorSamples(subpass_color_samples);
if (subpass_desc->pDepthStencilAttachment &&
subpass_desc->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
const auto attachment = subpass_desc->pDepthStencilAttachment->attachment;
const uint32_t subpass_depth_samples =
static_cast<uint32_t>(pPipeline->rp_state->createInfo.pAttachments[attachment].samples);
if (pPipeline->graphicsPipelineCI.pDepthStencilState) {
const bool ds_test_enabled =
(pPipeline->graphicsPipelineCI.pDepthStencilState->depthTestEnable == VK_TRUE) ||
(pPipeline->graphicsPipelineCI.pDepthStencilState->depthBoundsTestEnable == VK_TRUE) ||
(pPipeline->graphicsPipelineCI.pDepthStencilState->stencilTestEnable == VK_TRUE);
if (ds_test_enabled && (!IsPowerOfTwo(subpass_depth_samples) || (raster_samples != subpass_depth_samples))) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(device), "VUID-VkGraphicsPipelineCreateInfo-subpass-01411",
"vkCreateGraphicsPipelines: pCreateInfo[%d].pMultisampleState->rasterizationSamples (%u) "
"does not match the number of samples of the RenderPass depth attachment (%u).",
pipelineIndex, raster_samples, subpass_depth_samples);
}
}
}
if (IsPowerOfTwo(subpass_color_samples)) {
if (raster_samples < subpass_color_samples) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(device), "VUID-VkGraphicsPipelineCreateInfo-subpass-01412",
"vkCreateGraphicsPipelines: pCreateInfo[%d].pMultisampleState->rasterizationSamples (%u) "
"is not greater or equal to the number of samples of the RenderPass color attachment (%u).",
pipelineIndex, raster_samples, subpass_color_samples);
}
if (pPipeline->graphicsPipelineCI.pMultisampleState) {
if ((raster_samples > subpass_color_samples) &&
(pPipeline->graphicsPipelineCI.pMultisampleState->sampleShadingEnable == VK_TRUE)) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(device), "VUID-VkPipelineMultisampleStateCreateInfo-rasterizationSamples-01415",
"vkCreateGraphicsPipelines: pCreateInfo[%d].pMultisampleState->sampleShadingEnable must be "
"VK_FALSE when "
"pCreateInfo[%d].pMultisampleState->rasterizationSamples (%u) is greater than the number of "
"samples of the "
"subpass color attachment (%u).",
pipelineIndex, pipelineIndex, raster_samples, subpass_color_samples);
}
const auto *coverage_modulation_state = lvl_find_in_chain<VkPipelineCoverageModulationStateCreateInfoNV>(
pPipeline->graphicsPipelineCI.pMultisampleState->pNext);
if (coverage_modulation_state && (coverage_modulation_state->coverageModulationTableEnable == VK_TRUE)) {
if (coverage_modulation_state->coverageModulationTableCount != (raster_samples / subpass_color_samples)) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(device),
"VUID-VkPipelineCoverageModulationStateCreateInfoNV-coverageModulationTableEnable-01405",
"vkCreateGraphicsPipelines: pCreateInfos[%d] VkPipelineCoverageModulationStateCreateInfoNV "
"coverageModulationTableCount of %u is invalid.",
pipelineIndex, coverage_modulation_state->coverageModulationTableCount);
}
}
}
}
}
if (device_extensions.vk_nv_fragment_coverage_to_color) {
const auto coverage_to_color_state =
lvl_find_in_chain<VkPipelineCoverageToColorStateCreateInfoNV>(pPipeline->graphicsPipelineCI.pMultisampleState);
if (coverage_to_color_state && coverage_to_color_state->coverageToColorEnable == VK_TRUE) {
bool attachment_is_valid = false;
std::string error_detail;
if (coverage_to_color_state->coverageToColorLocation < subpass_desc->colorAttachmentCount) {
const auto color_attachment_ref =
subpass_desc->pColorAttachments[coverage_to_color_state->coverageToColorLocation];
if (color_attachment_ref.attachment != VK_ATTACHMENT_UNUSED) {
const auto color_attachment = pPipeline->rp_state->createInfo.pAttachments[color_attachment_ref.attachment];
switch (color_attachment.format) {
case VK_FORMAT_R8_UINT:
case VK_FORMAT_R8_SINT:
case VK_FORMAT_R16_UINT:
case VK_FORMAT_R16_SINT:
case VK_FORMAT_R32_UINT:
case VK_FORMAT_R32_SINT:
attachment_is_valid = true;
break;
default:
string_sprintf(&error_detail, "references an attachment with an invalid format (%s).",
string_VkFormat(color_attachment.format));
break;
}
} else {
string_sprintf(&error_detail,
"references an invalid attachment. The subpass pColorAttachments[%" PRIu32
"].attachment has the value "
"VK_ATTACHMENT_UNUSED.",
coverage_to_color_state->coverageToColorLocation);
}
} else {
string_sprintf(&error_detail,
"references an non-existing attachment since the subpass colorAttachmentCount is %" PRIu32 ".",
subpass_desc->colorAttachmentCount);
}
if (!attachment_is_valid) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(device),
"VUID-VkPipelineCoverageToColorStateCreateInfoNV-coverageToColorEnable-01404",
"vkCreateGraphicsPipelines: pCreateInfos[%" PRId32
"].pMultisampleState VkPipelineCoverageToColorStateCreateInfoNV "
"coverageToColorLocation = %" PRIu32 " %s",
pipelineIndex, coverage_to_color_state->coverageToColorLocation, error_detail.c_str());
}
}
}
}
return skip;
}
// Block of code at start here specifically for managing/tracking DSs
// Validate that given set is valid and that it's not being used by an in-flight CmdBuffer
// func_str is the name of the calling function
// Return false if no errors occur
// Return true if validation error occurs and callback returns true (to skip upcoming API call down the chain)
bool CoreChecks::ValidateIdleDescriptorSet(VkDescriptorSet set, const char *func_str) const {
if (disabled.idle_descriptor_set) return false;
bool skip = false;
auto set_node = setMap.find(set);
if (set_node == setMap.end()) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
HandleToUint64(set), kVUID_Core_DrawState_DoubleDestroy,
"Cannot call %s() on %s that has not been allocated.", func_str, report_data->FormatHandle(set).c_str());
} else {
// TODO : This covers various error cases so should pass error enum into this function and use passed in enum here
if (set_node->second->in_use.load()) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
HandleToUint64(set), "VUID-vkFreeDescriptorSets-pDescriptorSets-00309",
"Cannot call %s() on %s that is in use by a command buffer.", func_str,
report_data->FormatHandle(set).c_str());
}
}
return skip;
}
// If a renderpass is active, verify that the given command type is appropriate for current subpass state
bool CoreChecks::ValidateCmdSubpassState(const CMD_BUFFER_STATE *pCB, const CMD_TYPE cmd_type) const {
if (!pCB->activeRenderPass) return false;
bool skip = false;
if (pCB->activeSubpassContents == VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS &&
(cmd_type != CMD_EXECUTECOMMANDS && cmd_type != CMD_NEXTSUBPASS && cmd_type != CMD_ENDRENDERPASS &&
cmd_type != CMD_NEXTSUBPASS2KHR && cmd_type != CMD_ENDRENDERPASS2KHR)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(pCB->commandBuffer), kVUID_Core_DrawState_InvalidCommandBuffer,
"Commands cannot be called in a subpass using secondary command buffers.");
} else if (pCB->activeSubpassContents == VK_SUBPASS_CONTENTS_INLINE && cmd_type == CMD_EXECUTECOMMANDS) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(pCB->commandBuffer), kVUID_Core_DrawState_InvalidCommandBuffer,
"vkCmdExecuteCommands() cannot be called in a subpass using inline commands.");
}
return skip;
}
bool CoreChecks::ValidateCmdQueueFlags(const CMD_BUFFER_STATE *cb_node, const char *caller_name, VkQueueFlags required_flags,
const char *error_code) const {
auto pool = cb_node->command_pool.get();
if (pool) {
VkQueueFlags queue_flags = GetPhysicalDeviceState()->queue_family_properties[pool->queueFamilyIndex].queueFlags;
if (!(required_flags & queue_flags)) {
string required_flags_string;
for (auto flag : {VK_QUEUE_TRANSFER_BIT, VK_QUEUE_GRAPHICS_BIT, VK_QUEUE_COMPUTE_BIT}) {
if (flag & required_flags) {
if (required_flags_string.size()) {
required_flags_string += " or ";
}
required_flags_string += string_VkQueueFlagBits(flag);
}
}
return log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_node->commandBuffer), error_code,
"Cannot call %s on a command buffer allocated from a pool without %s capabilities..", caller_name,
required_flags_string.c_str());
}
}
return false;
}
static char const *GetCauseStr(VulkanTypedHandle obj) {
if (obj.type == kVulkanObjectTypeDescriptorSet) return "destroyed or updated";
if (obj.type == kVulkanObjectTypeCommandBuffer) return "destroyed or rerecorded";
return "destroyed";
}
bool CoreChecks::ReportInvalidCommandBuffer(const CMD_BUFFER_STATE *cb_state, const char *call_source) const {
bool skip = false;
for (auto obj : cb_state->broken_bindings) {
const char *cause_str = GetCauseStr(obj);
string VUID;
string_sprintf(&VUID, "%s-%s", kVUID_Core_DrawState_InvalidCommandBuffer, object_string[obj.type]);
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_state->commandBuffer), VUID.c_str(),
"You are adding %s to %s that is invalid because bound %s was %s.", call_source,
report_data->FormatHandle(cb_state->commandBuffer).c_str(), report_data->FormatHandle(obj).c_str(), cause_str);
}
return skip;
}
// 'commandBuffer must be in the recording state' valid usage error code for each command
// Autogenerated as part of the vk_validation_error_message.h codegen
static const std::array<const char *, CMD_RANGE_SIZE> must_be_recording_list = {{VUID_MUST_BE_RECORDING_LIST}};
// Validate the given command being added to the specified cmd buffer, flagging errors if CB is not in the recording state or if
// there's an issue with the Cmd ordering
bool CoreChecks::ValidateCmd(const CMD_BUFFER_STATE *cb_state, const CMD_TYPE cmd, const char *caller_name) const {
switch (cb_state->state) {
case CB_RECORDING:
return ValidateCmdSubpassState(cb_state, cmd);
case CB_INVALID_COMPLETE:
case CB_INVALID_INCOMPLETE:
return ReportInvalidCommandBuffer(cb_state, caller_name);
default:
assert(cmd != CMD_NONE);
const auto error = must_be_recording_list[cmd];
return log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_state->commandBuffer), error,
"You must call vkBeginCommandBuffer() before this call to %s.", caller_name);
}
}
bool CoreChecks::ValidateDeviceMaskToPhysicalDeviceCount(uint32_t deviceMask, VkDebugReportObjectTypeEXT VUID_handle_type,
uint64_t VUID_handle, const char *VUID) const {
bool skip = false;
uint32_t count = 1 << physical_device_count;
if (count <= deviceMask) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VUID_handle_type, VUID_handle, VUID,
"deviceMask(0x%" PRIx32 ") is invaild. Physical device count is %" PRIu32 ".", deviceMask,
physical_device_count);
}
return skip;
}
bool CoreChecks::ValidateDeviceMaskToZero(uint32_t deviceMask, VkDebugReportObjectTypeEXT VUID_handle_type, uint64_t VUID_handle,
const char *VUID) const {
bool skip = false;
if (deviceMask == 0) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VUID_handle_type, VUID_handle, VUID,
"deviceMask(0x%" PRIx32 ") must be non-zero.", deviceMask);
}
return skip;
}
bool CoreChecks::ValidateDeviceMaskToCommandBuffer(const CMD_BUFFER_STATE *pCB, uint32_t deviceMask,
VkDebugReportObjectTypeEXT VUID_handle_type, uint64_t VUID_handle,
const char *VUID) const {
bool skip = false;
if ((deviceMask & pCB->initial_device_mask) != deviceMask) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VUID_handle_type, VUID_handle, VUID,
"deviceMask(0x%" PRIx32 ") is not a subset of %s initial device mask(0x%" PRIx32 ").", deviceMask,
report_data->FormatHandle(pCB->commandBuffer).c_str(), pCB->initial_device_mask);
}
return skip;
}
bool CoreChecks::ValidateDeviceMaskToRenderPass(const CMD_BUFFER_STATE *pCB, uint32_t deviceMask,
VkDebugReportObjectTypeEXT VUID_handle_type, uint64_t VUID_handle,
const char *VUID) const {
bool skip = false;
if ((deviceMask & pCB->active_render_pass_device_mask) != deviceMask) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VUID_handle_type, VUID_handle, VUID,
"deviceMask(0x%" PRIx32 ") is not a subset of %s device mask(0x%" PRIx32 ").", deviceMask,
report_data->FormatHandle(pCB->activeRenderPass->renderPass).c_str(), pCB->active_render_pass_device_mask);
}
return skip;
}
// Flags validation error if the associated call is made inside a render pass. The apiName routine should ONLY be called outside a
// render pass.
bool CoreChecks::InsideRenderPass(const CMD_BUFFER_STATE *pCB, const char *apiName, const char *msgCode) const {
bool inside = false;
if (pCB->activeRenderPass) {
inside = log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(pCB->commandBuffer), msgCode, "%s: It is invalid to issue this call inside an active %s.",
apiName, report_data->FormatHandle(pCB->activeRenderPass->renderPass).c_str());
}
return inside;
}
// Flags validation error if the associated call is made outside a render pass. The apiName
// routine should ONLY be called inside a render pass.
bool CoreChecks::OutsideRenderPass(const CMD_BUFFER_STATE *pCB, const char *apiName, const char *msgCode) const {
bool outside = false;
if (((pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) && (!pCB->activeRenderPass)) ||
((pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) && (!pCB->activeRenderPass) &&
!(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT))) {
outside = log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(pCB->commandBuffer), msgCode, "%s: This call must be issued inside an active render pass.",
apiName);
}
return outside;
}
bool CoreChecks::ValidateQueueFamilyIndex(const PHYSICAL_DEVICE_STATE *pd_state, uint32_t requested_queue_family,
const char *err_code, const char *cmd_name, const char *queue_family_var_name) const {
bool skip = false;
if (requested_queue_family >= pd_state->queue_family_known_count) {
const char *conditional_ext_cmd =
instance_extensions.vk_khr_get_physical_device_properties_2 ? " or vkGetPhysicalDeviceQueueFamilyProperties2[KHR]" : "";
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
HandleToUint64(pd_state->phys_device), err_code,
"%s: %s (= %" PRIu32
") is not less than any previously obtained pQueueFamilyPropertyCount from "
"vkGetPhysicalDeviceQueueFamilyProperties%s (i.e. is not less than %s).",
cmd_name, queue_family_var_name, requested_queue_family, conditional_ext_cmd,
std::to_string(pd_state->queue_family_known_count).c_str());
}
return skip;
}
// Verify VkDeviceQueueCreateInfos
bool CoreChecks::ValidateDeviceQueueCreateInfos(const PHYSICAL_DEVICE_STATE *pd_state, uint32_t info_count,
const VkDeviceQueueCreateInfo *infos) const {
bool skip = false;
std::unordered_set<uint32_t> queue_family_set;
for (uint32_t i = 0; i < info_count; ++i) {
const auto requested_queue_family = infos[i].queueFamilyIndex;
std::string queue_family_var_name = "pCreateInfo->pQueueCreateInfos[" + std::to_string(i) + "].queueFamilyIndex";
skip |= ValidateQueueFamilyIndex(pd_state, requested_queue_family, "VUID-VkDeviceQueueCreateInfo-queueFamilyIndex-00381",
"vkCreateDevice", queue_family_var_name.c_str());
if (queue_family_set.insert(requested_queue_family).second == false) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(pd_state->phys_device), "VUID-VkDeviceCreateInfo-queueFamilyIndex-00372",
"CreateDevice(): %s (=%" PRIu32 ") is not unique within pQueueCreateInfos.",
queue_family_var_name.c_str(), requested_queue_family);
}
// Verify that requested queue count of queue family is known to be valid at this point in time
if (requested_queue_family < pd_state->queue_family_known_count) {
const auto requested_queue_count = infos[i].queueCount;
const bool queue_family_has_props = requested_queue_family < pd_state->queue_family_properties.size();
// spec guarantees at least one queue for each queue family
const uint32_t available_queue_count =
queue_family_has_props ? pd_state->queue_family_properties[requested_queue_family].queueCount : 1;
const char *conditional_ext_cmd = instance_extensions.vk_khr_get_physical_device_properties_2
? " or vkGetPhysicalDeviceQueueFamilyProperties2[KHR]"
: "";
if (requested_queue_count > available_queue_count) {
const std::string count_note =
queue_family_has_props
? "i.e. is not less than or equal to " +
std::to_string(pd_state->queue_family_properties[requested_queue_family].queueCount)
: "the pQueueFamilyProperties[" + std::to_string(requested_queue_family) + "] was never obtained";
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
HandleToUint64(pd_state->phys_device), "VUID-VkDeviceQueueCreateInfo-queueCount-00382",
"vkCreateDevice: pCreateInfo->pQueueCreateInfos[%" PRIu32 "].queueCount (=%" PRIu32
") is not less than or equal to available queue count for this pCreateInfo->pQueueCreateInfos[%" PRIu32
"].queueFamilyIndex} (=%" PRIu32 ") obtained previously from vkGetPhysicalDeviceQueueFamilyProperties%s (%s).",
i, requested_queue_count, i, requested_queue_family, conditional_ext_cmd, count_note.c_str());
}
}
}
return skip;
}
bool CoreChecks::PreCallValidateCreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkDevice *pDevice) const {
bool skip = false;
auto pd_state = GetPhysicalDeviceState(gpu);
// TODO: object_tracker should perhaps do this instead
// and it does not seem to currently work anyway -- the loader just crashes before this point
if (!pd_state) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0,
kVUID_Core_DevLimit_MustQueryCount,
"Invalid call to vkCreateDevice() w/o first calling vkEnumeratePhysicalDevices().");
} else {
skip |= ValidateDeviceQueueCreateInfos(pd_state, pCreateInfo->queueCreateInfoCount, pCreateInfo->pQueueCreateInfos);
}
return skip;
}
void CoreChecks::PostCallRecordCreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkDevice *pDevice, VkResult result) {
// The state tracker sets up the device state
StateTracker::PostCallRecordCreateDevice(gpu, pCreateInfo, pAllocator, pDevice, result);
// Add the callback hooks for the functions that are either broadly or deeply used and that the ValidationStateTracker refactor
// would be messier without.
// TODO: Find a good way to do this hooklessly.
ValidationObject *device_object = GetLayerDataPtr(get_dispatch_key(*pDevice), layer_data_map);
ValidationObject *validation_data = GetValidationObject(device_object->object_dispatch, LayerObjectTypeCoreValidation);
CoreChecks *core_checks = static_cast<CoreChecks *>(validation_data);
core_checks->SetSetImageViewInitialLayoutCallback(
[core_checks](CMD_BUFFER_STATE *cb_node, const IMAGE_VIEW_STATE &iv_state, VkImageLayout layout) -> void {
core_checks->SetImageViewInitialLayout(cb_node, iv_state, layout);
});
}
void CoreChecks::PreCallRecordDestroyDevice(VkDevice device, const VkAllocationCallbacks *pAllocator) {
if (!device) return;
imageSubresourceMap.clear();
imageLayoutMap.clear();
StateTracker::PreCallRecordDestroyDevice(device, pAllocator);
}
// For given stage mask, if Geometry shader stage is on w/o GS being enabled, report geo_error_id
// and if Tessellation Control or Evaluation shader stages are on w/o TS being enabled, report tess_error_id.
// Similarly for mesh and task shaders.
bool CoreChecks::ValidateStageMaskGsTsEnables(VkPipelineStageFlags stageMask, const char *caller, const char *geo_error_id,
const char *tess_error_id, const char *mesh_error_id,
const char *task_error_id) const {
bool skip = false;
if (!enabled_features.core.geometryShader && (stageMask & VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, geo_error_id,
"%s call includes a stageMask with VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT bit set when device does not have "
"geometryShader feature enabled.",
caller);
}
if (!enabled_features.core.tessellationShader &&
(stageMask & (VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT | VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT))) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, tess_error_id,
"%s call includes a stageMask with VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT and/or "
"VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT bit(s) set when device does not have "
"tessellationShader feature enabled.",
caller);
}
if (!enabled_features.mesh_shader.meshShader && (stageMask & VK_PIPELINE_STAGE_MESH_SHADER_BIT_NV)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, mesh_error_id,
"%s call includes a stageMask with VK_PIPELINE_STAGE_MESH_SHADER_BIT_NV bit set when device does not have "
"VkPhysicalDeviceMeshShaderFeaturesNV::meshShader feature enabled.",
caller);
}
if (!enabled_features.mesh_shader.taskShader && (stageMask & VK_PIPELINE_STAGE_TASK_SHADER_BIT_NV)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, task_error_id,
"%s call includes a stageMask with VK_PIPELINE_STAGE_TASK_SHADER_BIT_NV bit set when device does not have "
"VkPhysicalDeviceMeshShaderFeaturesNV::taskShader feature enabled.",
caller);
}
return skip;
}
// Note: This function assumes that the global lock is held by the calling thread.
// For the given queue, verify the queue state up to the given seq number.
// Currently the only check is to make sure that if there are events to be waited on prior to
// a QueryReset, make sure that all such events have been signalled.
bool CoreChecks::VerifyQueueStateToSeq(const QUEUE_STATE *initial_queue, uint64_t initial_seq) const {
bool skip = false;
// sequence number we want to validate up to, per queue
std::unordered_map<const QUEUE_STATE *, uint64_t> target_seqs{{initial_queue, initial_seq}};
// sequence number we've completed validation for, per queue
std::unordered_map<const QUEUE_STATE *, uint64_t> done_seqs;
std::vector<const QUEUE_STATE *> worklist{initial_queue};
while (worklist.size()) {
auto queue = worklist.back();
worklist.pop_back();
auto target_seq = target_seqs[queue];
auto seq = std::max(done_seqs[queue], queue->seq);
auto sub_it = queue->submissions.begin() + int(seq - queue->seq); // seq >= queue->seq
for (; seq < target_seq; ++sub_it, ++seq) {
for (auto &wait : sub_it->waitSemaphores) {
auto other_queue = GetQueueState(wait.queue);
if (other_queue == queue) continue; // semaphores /always/ point backwards, so no point here.
auto other_target_seq = std::max(target_seqs[other_queue], wait.seq);
auto other_done_seq = std::max(done_seqs[other_queue], other_queue->seq);
// if this wait is for another queue, and covers new sequence
// numbers beyond what we've already validated, mark the new
// target seq and (possibly-re)add the queue to the worklist.
if (other_done_seq < other_target_seq) {
target_seqs[other_queue] = other_target_seq;
worklist.push_back(other_queue);
}
}
}
// finally mark the point we've now validated this queue to.
done_seqs[queue] = seq;
}
return skip;
}
// When the given fence is retired, verify outstanding queue operations through the point of the fence
bool CoreChecks::VerifyQueueStateToFence(VkFence fence) const {
auto fence_state = GetFenceState(fence);
if (fence_state && fence_state->scope == kSyncScopeInternal && VK_NULL_HANDLE != fence_state->signaler.first) {
return VerifyQueueStateToSeq(GetQueueState(fence_state->signaler.first), fence_state->signaler.second);
}
return false;
}
bool CoreChecks::ValidateCommandBufferSimultaneousUse(const CMD_BUFFER_STATE *pCB, int current_submit_count) const {
bool skip = false;
if ((pCB->in_use.load() || current_submit_count > 1) &&
!(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
"VUID-vkQueueSubmit-pCommandBuffers-00071", "%s is already in use and is not marked for simultaneous use.",
report_data->FormatHandle(pCB->commandBuffer).c_str());
}
return skip;
}
bool CoreChecks::ValidateCommandBufferState(const CMD_BUFFER_STATE *cb_state, const char *call_source, int current_submit_count,
const char *vu_id) const {
bool skip = false;
if (disabled.command_buffer_state) return skip;
// Validate ONE_TIME_SUBMIT_BIT CB is not being submitted more than once
if ((cb_state->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT) &&
(cb_state->submitCount + current_submit_count > 1)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
kVUID_Core_DrawState_CommandBufferSingleSubmitViolation,
"%s was begun w/ VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT set, but has been submitted 0x%" PRIxLEAST64
"times.",
report_data->FormatHandle(cb_state->commandBuffer).c_str(), cb_state->submitCount + current_submit_count);
}
// Validate that cmd buffers have been updated
switch (cb_state->state) {
case CB_INVALID_INCOMPLETE:
case CB_INVALID_COMPLETE:
skip |= ReportInvalidCommandBuffer(cb_state, call_source);
break;
case CB_NEW:
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
(uint64_t)(cb_state->commandBuffer), vu_id,
"%s used in the call to %s is unrecorded and contains no commands.",
report_data->FormatHandle(cb_state->commandBuffer).c_str(), call_source);
break;
case CB_RECORDING:
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_state->commandBuffer), kVUID_Core_DrawState_NoEndCommandBuffer,
"You must call vkEndCommandBuffer() on %s before this call to %s!",
report_data->FormatHandle(cb_state->commandBuffer).c_str(), call_source);
break;
default: /* recorded */
break;
}
return skip;
}
// Check that the queue family index of 'queue' matches one of the entries in pQueueFamilyIndices
bool CoreChecks::ValidImageBufferQueue(const CMD_BUFFER_STATE *cb_node, const VulkanTypedHandle &object, uint32_t queueFamilyIndex,
uint32_t count, const uint32_t *indices) const {
bool found = false;
bool skip = false;
for (uint32_t i = 0; i < count; i++) {
if (indices[i] == queueFamilyIndex) {
found = true;
break;
}
}
if (!found) {
skip = log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, get_debug_report_enum[object.type], object.handle,
kVUID_Core_DrawState_InvalidQueueFamily,
"vkQueueSubmit: %s contains %s which was not created allowing concurrent access to "
"this queue family %d.",
report_data->FormatHandle(cb_node->commandBuffer).c_str(), report_data->FormatHandle(object).c_str(),
queueFamilyIndex);
}
return skip;
}
// Validate that queueFamilyIndices of primary command buffers match this queue
// Secondary command buffers were previously validated in vkCmdExecuteCommands().
bool CoreChecks::ValidateQueueFamilyIndices(const CMD_BUFFER_STATE *pCB, VkQueue queue) const {
bool skip = false;
auto pPool = pCB->command_pool.get();
auto queue_state = GetQueueState(queue);
if (pPool && queue_state) {
if (pPool->queueFamilyIndex != queue_state->queueFamilyIndex) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(pCB->commandBuffer), "VUID-vkQueueSubmit-pCommandBuffers-00074",
"vkQueueSubmit: Primary %s created in queue family %d is being submitted on %s "
"from queue family %d.",
report_data->FormatHandle(pCB->commandBuffer).c_str(), pPool->queueFamilyIndex,
report_data->FormatHandle(queue).c_str(), queue_state->queueFamilyIndex);
}
// Ensure that any bound images or buffers created with SHARING_MODE_CONCURRENT have access to the current queue family
for (const auto &object : pCB->object_bindings) {
if (object.type == kVulkanObjectTypeImage) {
auto image_state = object.node ? (IMAGE_STATE *)object.node : GetImageState(object.Cast<VkImage>());
if (image_state && image_state->createInfo.sharingMode == VK_SHARING_MODE_CONCURRENT) {
skip |= ValidImageBufferQueue(pCB, object, queue_state->queueFamilyIndex,
image_state->createInfo.queueFamilyIndexCount,
image_state->createInfo.pQueueFamilyIndices);
}
} else if (object.type == kVulkanObjectTypeBuffer) {
auto buffer_state = object.node ? (BUFFER_STATE *)object.node : GetBufferState(object.Cast<VkBuffer>());
if (buffer_state && buffer_state->createInfo.sharingMode == VK_SHARING_MODE_CONCURRENT) {
skip |= ValidImageBufferQueue(pCB, object, queue_state->queueFamilyIndex,
buffer_state->createInfo.queueFamilyIndexCount,
buffer_state->createInfo.pQueueFamilyIndices);
}
}
}
}
return skip;
}
// Validate that a command buffer submitted had the performance locked hold
// when recording command if it contains performance queries.
bool CoreChecks::ValidatePerformanceQueries(const CMD_BUFFER_STATE *pCB, VkQueue queue, VkQueryPool &first_query_pool,
uint32_t counterPassIndex) const {
bool skip = false;
bool different_pools = false;
bool indexed_different_pool = false;
if (pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
for (const auto &secondaryCB : pCB->linkedCommandBuffers)
skip |= ValidatePerformanceQueries(secondaryCB, queue, first_query_pool, counterPassIndex);
}
for (const auto &query : pCB->startedQueries) {
const auto query_pool_state = GetQueryPoolState(query.pool);
if (query_pool_state->createInfo.queryType != VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR) continue;
if (counterPassIndex >= query_pool_state->n_performance_passes) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(pCB->commandBuffer), "VUID-VkPerformanceQuerySubmitInfoKHR-counterPassIndex-03221",
"Invalid counterPassIndex (%u, maximum allowed %u) value for query pool %s.", counterPassIndex,
query_pool_state->n_performance_passes, report_data->FormatHandle(query.pool).c_str());
}
if (!pCB->performance_lock_acquired || pCB->performance_lock_released) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(pCB->commandBuffer), "VUID-vkQueueSubmit-pCommandBuffers-03220",
"Commandbuffer %s was submitted and contains a performance query but the"
"profiling lock was not held continuously throughout the recording of commands.",
report_data->FormatHandle(pCB->commandBuffer).c_str());
}
if (query_pool_state->has_perf_scope_command_buffer && (pCB->commandCount - 1) != query.endCommandIndex) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(pCB->commandBuffer), "VUID-vkCmdEndQuery-queryPool-03227",
"vkCmdEndQuery: Query pool %s was created with a counter of scope"
"VK_QUERY_SCOPE_COMMAND_BUFFER_KHR but the end of the query is not the last "
"command in the command buffer %s.",
report_data->FormatHandle(query.pool).c_str(), report_data->FormatHandle(pCB->commandBuffer).c_str());
}
if (first_query_pool != VK_NULL_HANDLE) {
if (query_pool_state->pool != first_query_pool) {
different_pools = true;
indexed_different_pool = query.indexed;
}
} else
first_query_pool = query_pool_state->pool;
}
if (different_pools && !enabled_features.performance_query_features.performanceCounterMultipleQueryPools) {
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(pCB->commandBuffer),
indexed_different_pool ? "VUID-vkCmdBeginQueryIndexedEXT-queryPool-03226" : "VUID-vkCmdBeginQuery-queryPool-03226",
"Commandbuffer %s contains more than one performance query pool but "
"performanceCounterMultipleQueryPools is not enabled.",
report_data->FormatHandle(pCB->commandBuffer).c_str());
}
return skip;
}
bool CoreChecks::ValidatePrimaryCommandBufferState(const CMD_BUFFER_STATE *pCB, int current_submit_count,
QFOTransferCBScoreboards<VkImageMemoryBarrier> *qfo_image_scoreboards,
QFOTransferCBScoreboards<VkBufferMemoryBarrier> *qfo_buffer_scoreboards) const {
// Track in-use for resources off of primary and any secondary CBs
bool skip = false;
if (pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(pCB->commandBuffer), "VUID-VkSubmitInfo-pCommandBuffers-00075",
"Command buffer %s was included in the pCommandBuffers array of QueueSubmit but was allocated with "
"VK_COMMAND_BUFFER_LEVEL_SECONDARY.",
report_data->FormatHandle(pCB->commandBuffer).c_str());
} else {
for (auto pSubCB : pCB->linkedCommandBuffers) {
skip |= ValidateQueuedQFOTransfers(pSubCB, qfo_image_scoreboards, qfo_buffer_scoreboards);
// TODO: replace with InvalidateCommandBuffers() at recording.
if ((pSubCB->primaryCommandBuffer != pCB->commandBuffer) &&
!(pSubCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
"VUID-vkQueueSubmit-pCommandBuffers-00073",
"%s was submitted with secondary %s but that buffer has subsequently been bound to "
"primary %s and it does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set.",
report_data->FormatHandle(pCB->commandBuffer).c_str(),
report_data->FormatHandle(pSubCB->commandBuffer).c_str(),
report_data->FormatHandle(pSubCB->primaryCommandBuffer).c_str());
}
}
}
// If USAGE_SIMULTANEOUS_USE_BIT not set then CB cannot already be executing on device
skip |= ValidateCommandBufferSimultaneousUse(pCB, current_submit_count);
skip |= ValidateQueuedQFOTransfers(pCB, qfo_image_scoreboards, qfo_buffer_scoreboards);
skip |= ValidateCommandBufferState(pCB, "vkQueueSubmit()", current_submit_count, "VUID-vkQueueSubmit-pCommandBuffers-00072");
return skip;
}
bool CoreChecks::ValidateFenceForSubmit(const FENCE_STATE *pFence) const {
bool skip = false;
if (pFence && pFence->scope == kSyncScopeInternal) {
if (pFence->state == FENCE_INFLIGHT) {
// TODO: opportunities for "VUID-vkQueueSubmit-fence-00064", "VUID-vkQueueBindSparse-fence-01114",
// "VUID-vkAcquireNextImageKHR-fence-01287"
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
HandleToUint64(pFence->fence), kVUID_Core_DrawState_InvalidFence,
"%s is already in use by another submission.", report_data->FormatHandle(pFence->fence).c_str());
}
else if (pFence->state == FENCE_RETIRED) {
// TODO: opportunities for "VUID-vkQueueSubmit-fence-00063", "VUID-vkQueueBindSparse-fence-01113",
// "VUID-vkAcquireNextImageKHR-fence-01287"
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
HandleToUint64(pFence->fence), kVUID_Core_MemTrack_FenceState,
"%s submitted in SIGNALED state. Fences must be reset before being submitted",
report_data->FormatHandle(pFence->fence).c_str());
}
}
return skip;
}
void CoreChecks::PostCallRecordQueueSubmit(VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits, VkFence fence,
VkResult result) {
StateTracker::PostCallRecordQueueSubmit(queue, submitCount, pSubmits, fence, result);
// The triply nested for duplicates that in the StateTracker, but avoids the need for two additional callbacks.
for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
const VkSubmitInfo *submit = &pSubmits[submit_idx];
for (uint32_t i = 0; i < submit->commandBufferCount; i++) {
auto cb_node = GetCBState(submit->pCommandBuffers[i]);
if (cb_node) {
for (auto secondaryCmdBuffer : cb_node->linkedCommandBuffers) {
UpdateCmdBufImageLayouts(secondaryCmdBuffer);
RecordQueuedQFOTransfers(secondaryCmdBuffer);
}
UpdateCmdBufImageLayouts(cb_node);
RecordQueuedQFOTransfers(cb_node);
}
}
}
}
bool CoreChecks::ValidateSemaphoresForSubmit(VkQueue queue, const VkSubmitInfo *submit,
unordered_set<VkSemaphore> *unsignaled_sema_arg,
unordered_set<VkSemaphore> *signaled_sema_arg,
unordered_set<VkSemaphore> *internal_sema_arg,
unordered_map<VkSemaphore, std::set<uint64_t>> *timeline_values_arg) const {
bool skip = false;
auto &signaled_semaphores = *signaled_sema_arg;
auto &unsignaled_semaphores = *unsignaled_sema_arg;
auto &internal_semaphores = *internal_sema_arg;
auto &timeline_values = *timeline_values_arg;
unordered_map<VkSemaphore, std::set<uint64_t>>::iterator it;
auto *timeline_semaphore_submit_info = lvl_find_in_chain<VkTimelineSemaphoreSubmitInfoKHR>(submit->pNext);
for (uint32_t i = 0; i < submit->waitSemaphoreCount; ++i) {
skip |=
ValidateStageMaskGsTsEnables(submit->pWaitDstStageMask[i], "vkQueueSubmit()",
"VUID-VkSubmitInfo-pWaitDstStageMask-00076", "VUID-VkSubmitInfo-pWaitDstStageMask-00077",
"VUID-VkSubmitInfo-pWaitDstStageMask-02089", "VUID-VkSubmitInfo-pWaitDstStageMask-02090");
VkSemaphore semaphore = submit->pWaitSemaphores[i];
const auto *pSemaphore = GetSemaphoreState(semaphore);
if (pSemaphore && pSemaphore->type == VK_SEMAPHORE_TYPE_TIMELINE_KHR && !timeline_semaphore_submit_info) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
HandleToUint64(semaphore), "VUID-VkSubmitInfo-pWaitSemaphores-03239",
"VkQueueSubmit: %s is a timeline semaphore, but pBindInfo does not"
"include an instance of VkTimelineSemaphoreSubmitInfoKHR",
report_data->FormatHandle(semaphore).c_str());
}
if (pSemaphore && pSemaphore->type == VK_SEMAPHORE_TYPE_TIMELINE_KHR && timeline_semaphore_submit_info &&
submit->waitSemaphoreCount != timeline_semaphore_submit_info->waitSemaphoreValueCount) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
HandleToUint64(semaphore), "VUID-VkSubmitInfo-pNext-03240",
"VkQueueSubmit: %s is a timeline semaphore, it contains an instance of"
"VkTimelineSemaphoreSubmitInfoKHR, but waitSemaphoreValueCount is different than "
"waitSemaphoreCount",
report_data->FormatHandle(semaphore).c_str());
}
if (pSemaphore && pSemaphore->type == VK_SEMAPHORE_TYPE_BINARY_KHR &&
(pSemaphore->scope == kSyncScopeInternal || internal_semaphores.count(semaphore))) {
if (unsignaled_semaphores.count(semaphore) || (!(signaled_semaphores.count(semaphore)) && !(pSemaphore->signaled))) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
HandleToUint64(semaphore), kVUID_Core_DrawState_QueueForwardProgress,
"%s is waiting on %s that has no way to be signaled.", report_data->FormatHandle(queue).c_str(),
report_data->FormatHandle(semaphore).c_str());
} else {
signaled_semaphores.erase(semaphore);
unsignaled_semaphores.insert(semaphore);
}
}
if (pSemaphore && pSemaphore->type == VK_SEMAPHORE_TYPE_BINARY_KHR && pSemaphore->scope == kSyncScopeExternalTemporary) {
internal_semaphores.insert(semaphore);
}
if (!skip && pSemaphore && pSemaphore->type == VK_SEMAPHORE_TYPE_TIMELINE_KHR) {
auto &values = timeline_values[semaphore];
if (values.empty()) {
values.insert(pSemaphore->payload);
}
values.insert(timeline_semaphore_submit_info->pWaitSemaphoreValues[i]);
}
}
for (uint32_t i = 0; i < submit->signalSemaphoreCount; ++i) {
VkSemaphore semaphore = submit->pSignalSemaphores[i];
const auto *pSemaphore = GetSemaphoreState(semaphore);
if (pSemaphore && pSemaphore->type == VK_SEMAPHORE_TYPE_TIMELINE_KHR && !timeline_semaphore_submit_info) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
HandleToUint64(semaphore), "VUID-VkSubmitInfo-pWaitSemaphores-03239",
"VkQueueSubmit: %s is a timeline semaphore, but pBindInfo does not"
"include an instance of VkTimelineSemaphoreSubmitInfoKHR",
report_data->FormatHandle(semaphore).c_str());
}
if (pSemaphore && pSemaphore->type == VK_SEMAPHORE_TYPE_TIMELINE_KHR && timeline_semaphore_submit_info &&
submit->signalSemaphoreCount != timeline_semaphore_submit_info->signalSemaphoreValueCount) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
HandleToUint64(semaphore), "VUID-VkSubmitInfo-pNext-03241",
"VkQueueSubmit: %s is a timeline semaphore, it contains an instance of"
"VkTimelineSemaphoreSubmitInfoKHR, but signalSemaphoreValueCount is different than "
"signalSemaphoreCount",
report_data->FormatHandle(semaphore).c_str());
}
if (pSemaphore && pSemaphore->type == VK_SEMAPHORE_TYPE_TIMELINE_KHR && timeline_semaphore_submit_info &&
timeline_semaphore_submit_info->pSignalSemaphoreValues[i] <= pSemaphore->payload) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
HandleToUint64(semaphore), "VUID-VkSubmitInfo-pSignalSemaphores-03242",
"VkQueueSubmit: signal value in %s must be greater than current timeline semaphore %s value",
report_data->FormatHandle(queue).c_str(), report_data->FormatHandle(semaphore).c_str());
}
if (pSemaphore && pSemaphore->type == VK_SEMAPHORE_TYPE_BINARY_KHR &&
(pSemaphore->scope == kSyncScopeInternal || internal_semaphores.count(semaphore))) {
if (signaled_semaphores.count(semaphore) || (!(unsignaled_semaphores.count(semaphore)) && pSemaphore->signaled)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
HandleToUint64(semaphore), kVUID_Core_DrawState_QueueForwardProgress,
"%s is signaling %s that was previously signaled by %s but has not since "
"been waited on by any queue.",
report_data->FormatHandle(queue).c_str(), report_data->FormatHandle(semaphore).c_str(),
report_data->FormatHandle(pSemaphore->signaler.first).c_str());
} else {
unsignaled_semaphores.erase(semaphore);
signaled_semaphores.insert(semaphore);
}
}
if (!skip && pSemaphore && pSemaphore->type == VK_SEMAPHORE_TYPE_TIMELINE_KHR) {
auto &values = timeline_values[semaphore];
if (values.empty()) {
values.insert(pSemaphore->payload);
}
values.insert(timeline_semaphore_submit_info->pSignalSemaphoreValues[i]);
}
}
return skip;
}
bool CoreChecks::ValidateMaxTimelineSemaphoreValueDifference(VkQueue queue, VkSemaphore semaphore, uint64_t semaphoreTriggerValue,
unordered_map<VkSemaphore, std::set<uint64_t>> *timeline_values_arg,
const char *func_name, const char *vuid) const {
bool skip = false;
auto &timeline_values = *timeline_values_arg;
const auto pSemaphore = GetSemaphoreState(semaphore);
if (pSemaphore && pSemaphore->type == VK_SEMAPHORE_TYPE_TIMELINE_KHR) {
assert(semaphoreTriggerValue > 0);
// This set contains the current payload value, plus all the wait/signal
// values the semaphore can take, in order
auto &values = timeline_values[semaphore];
// Search for the previous value and check if the difference is bigger
// than allowed
auto it = values.find(semaphoreTriggerValue);
if (it == begin(values)) {
return false;
}
if (semaphoreTriggerValue - *(--it) > phys_dev_ext_props.timeline_semaphore_props.maxTimelineSemaphoreValueDifference) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
HandleToUint64(semaphore), vuid,
"%s: %s contains timeline sempahore %s that sets its wait value with a margin "
"greater than maxTimelineSemaphoreValueDifference",
func_name, report_data->FormatHandle(queue).c_str(), report_data->FormatHandle(semaphore).c_str());
}
}
return skip;
}
bool CoreChecks::ValidateCommandBuffersForSubmit(VkQueue queue, const VkSubmitInfo *submit,
ImageSubresPairLayoutMap *localImageLayoutMap_arg,
QueryMap *local_query_to_state_map,
vector<VkCommandBuffer> *current_cmds_arg) const {
bool skip = false;
auto queue_state = GetQueueState(queue);
ImageSubresPairLayoutMap &localImageLayoutMap = *localImageLayoutMap_arg;
vector<VkCommandBuffer> ¤t_cmds = *current_cmds_arg;
QFOTransferCBScoreboards<VkImageMemoryBarrier> qfo_image_scoreboards;
QFOTransferCBScoreboards<VkBufferMemoryBarrier> qfo_buffer_scoreboards;
EventToStageMap localEventToStageMap;
const auto perf_submit = lvl_find_in_chain<VkPerformanceQuerySubmitInfoKHR>(submit->pNext);
for (uint32_t i = 0; i < submit->commandBufferCount; i++) {
const auto *cb_node = GetCBState(submit->pCommandBuffers[i]);
if (cb_node) {
skip |= ValidateCmdBufImageLayouts(cb_node, imageLayoutMap, &localImageLayoutMap);
current_cmds.push_back(submit->pCommandBuffers[i]);
skip |= ValidatePrimaryCommandBufferState(
cb_node, (int)std::count(current_cmds.begin(), current_cmds.end(), submit->pCommandBuffers[i]),
&qfo_image_scoreboards, &qfo_buffer_scoreboards);
skip |= ValidateQueueFamilyIndices(cb_node, queue);
VkQueryPool first_query_pool = VK_NULL_HANDLE;
skip |= ValidatePerformanceQueries(cb_node, queue, first_query_pool, perf_submit ? perf_submit->counterPassIndex : 0);
for (auto descriptorSet : cb_node->validate_descriptorsets_in_queuesubmit) {
const cvdescriptorset::DescriptorSet *set_node = GetSetNode(descriptorSet.first);
if (set_node) {
for (auto pipe : descriptorSet.second) {
for (auto binding : pipe.second) {
std::string error;
std::vector<uint32_t> dynamicOffsets;
// dynamic data isn't allowed in UPDATE_AFTER_BIND, so dynamicOffsets is always empty.
if (!ValidateDescriptorSetBindingData(cb_node, set_node, dynamicOffsets, binding.first, binding.second,
"vkQueueSubmit()", &error)) {
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
HandleToUint64(descriptorSet.first), kVUID_Core_DrawState_DescriptorSetNotUpdated,
"%s bound the following validation error at %s time: %s",
report_data->FormatHandle(descriptorSet.first).c_str(), "vkQueueSubmit()", error.c_str());
}
}
}
}
}
// Potential early exit here as bad object state may crash in delayed function calls
if (skip) {
return true;
}
// Call submit-time functions to validate or update local mirrors of state (to preserve const-ness at validate time)
for (auto &function : cb_node->queue_submit_functions) {
skip |= function(this, queue_state);
}
for (auto &function : cb_node->eventUpdates) {
skip |= function(this, /*do_validate*/ true, &localEventToStageMap);
}
for (auto &function : cb_node->queryUpdates) {
skip |= function(this, /*do_validate*/ true, local_query_to_state_map);
}
}
}
return skip;
}
bool CoreChecks::PreCallValidateQueueSubmit(VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits,
VkFence fence) const {
const auto *pFence = GetFenceState(fence);
bool skip = ValidateFenceForSubmit(pFence);
if (skip) {
return true;
}
unordered_set<VkSemaphore> signaled_semaphores;
unordered_set<VkSemaphore> unsignaled_semaphores;
unordered_set<VkSemaphore> internal_semaphores;
unordered_map<VkSemaphore, std::set<uint64_t>> timeline_values;
vector<VkCommandBuffer> current_cmds;
ImageSubresPairLayoutMap localImageLayoutMap;
QueryMap local_query_to_state_map;
// Now verify each individual submit
for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
const VkSubmitInfo *submit = &pSubmits[submit_idx];
skip |= ValidateSemaphoresForSubmit(queue, submit, &unsignaled_semaphores, &signaled_semaphores, &internal_semaphores,
&timeline_values);
skip |= ValidateCommandBuffersForSubmit(queue, submit, &localImageLayoutMap, &local_query_to_state_map, ¤t_cmds);
auto chained_device_group_struct = lvl_find_in_chain<VkDeviceGroupSubmitInfo>(submit->pNext);
if (chained_device_group_struct && chained_device_group_struct->commandBufferCount > 0) {
for (uint32_t i = 0; i < chained_device_group_struct->commandBufferCount; ++i) {
skip |= ValidateDeviceMaskToPhysicalDeviceCount(chained_device_group_struct->pCommandBufferDeviceMasks[i],
VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT, HandleToUint64(queue),
"VUID-VkDeviceGroupSubmitInfo-pCommandBufferDeviceMasks-00086");
}
}
}
if (skip) return skip;
// Now verify maxTimelineSemaphoreValueDifference
for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
const VkSubmitInfo *submit = &pSubmits[submit_idx];
auto *info = lvl_find_in_chain<VkTimelineSemaphoreSubmitInfoKHR>(submit->pNext);
for (uint32_t i = 0; i < submit->waitSemaphoreCount; ++i) {
VkSemaphore semaphore = submit->pWaitSemaphores[i];
skip |= ValidateMaxTimelineSemaphoreValueDifference(queue, semaphore, info ? info->pWaitSemaphoreValues[i] : 0,
&timeline_values, "VkQueueSubmit",
"VUID-VkSubmitInfo-pWaitSemaphores-03243");
}
for (uint32_t i = 0; i < submit->signalSemaphoreCount; ++i) {
VkSemaphore semaphore = submit->pSignalSemaphores[i];
skip |= ValidateMaxTimelineSemaphoreValueDifference(queue, semaphore, info ? info->pSignalSemaphoreValues[i] : 0,
&timeline_values, "VkQueueSubmit",
"VUID-VkSubmitInfo-pSignalSemaphores-03244");
}
}
return skip;
}
#ifdef VK_USE_PLATFORM_ANDROID_KHR
// Android-specific validation that uses types defined only on Android and only for NDK versions
// that support the VK_ANDROID_external_memory_android_hardware_buffer extension.
// This chunk could move into a seperate core_validation_android.cpp file... ?
// clang-format off
// Map external format and usage flags to/from equivalent Vulkan flags
// (Tables as of v1.1.92)
// AHardwareBuffer Format Vulkan Format
// ====================== =============
// AHARDWAREBUFFER_FORMAT_R8G8B8A8_UNORM VK_FORMAT_R8G8B8A8_UNORM
// AHARDWAREBUFFER_FORMAT_R8G8B8X8_UNORM VK_FORMAT_R8G8B8A8_UNORM
// AHARDWAREBUFFER_FORMAT_R8G8B8_UNORM VK_FORMAT_R8G8B8_UNORM
// AHARDWAREBUFFER_FORMAT_R5G6B5_UNORM VK_FORMAT_R5G6B5_UNORM_PACK16
// AHARDWAREBUFFER_FORMAT_R16G16B16A16_FLOAT VK_FORMAT_R16G16B16A16_SFLOAT
// AHARDWAREBUFFER_FORMAT_R10G10B10A2_UNORM VK_FORMAT_A2B10G10R10_UNORM_PACK32
// AHARDWAREBUFFER_FORMAT_D16_UNORM VK_FORMAT_D16_UNORM
// AHARDWAREBUFFER_FORMAT_D24_UNORM VK_FORMAT_X8_D24_UNORM_PACK32
// AHARDWAREBUFFER_FORMAT_D24_UNORM_S8_UINT VK_FORMAT_D24_UNORM_S8_UINT
// AHARDWAREBUFFER_FORMAT_D32_FLOAT VK_FORMAT_D32_SFLOAT
// AHARDWAREBUFFER_FORMAT_D32_FLOAT_S8_UINT VK_FORMAT_D32_SFLOAT_S8_UINT
// AHARDWAREBUFFER_FORMAT_S8_UINT VK_FORMAT_S8_UINT
// The AHARDWAREBUFFER_FORMAT_* are an enum in the NDK headers, but get passed in to Vulkan
// as uint32_t. Casting the enums here avoids scattering casts around in the code.
std::map<uint32_t, VkFormat> ahb_format_map_a2v = {
{ (uint32_t)AHARDWAREBUFFER_FORMAT_R8G8B8A8_UNORM, VK_FORMAT_R8G8B8A8_UNORM },
{ (uint32_t)AHARDWAREBUFFER_FORMAT_R8G8B8X8_UNORM, VK_FORMAT_R8G8B8A8_UNORM },
{ (uint32_t)AHARDWAREBUFFER_FORMAT_R8G8B8_UNORM, VK_FORMAT_R8G8B8_UNORM },
{ (uint32_t)AHARDWAREBUFFER_FORMAT_R5G6B5_UNORM, VK_FORMAT_R5G6B5_UNORM_PACK16 },
{ (uint32_t)AHARDWAREBUFFER_FORMAT_R16G16B16A16_FLOAT, VK_FORMAT_R16G16B16A16_SFLOAT },
{ (uint32_t)AHARDWAREBUFFER_FORMAT_R10G10B10A2_UNORM, VK_FORMAT_A2B10G10R10_UNORM_PACK32 },
{ (uint32_t)AHARDWAREBUFFER_FORMAT_D16_UNORM, VK_FORMAT_D16_UNORM },
{ (uint32_t)AHARDWAREBUFFER_FORMAT_D24_UNORM, VK_FORMAT_X8_D24_UNORM_PACK32 },
{ (uint32_t)AHARDWAREBUFFER_FORMAT_D24_UNORM_S8_UINT, VK_FORMAT_D24_UNORM_S8_UINT },
{ (uint32_t)AHARDWAREBUFFER_FORMAT_D32_FLOAT, VK_FORMAT_D32_SFLOAT },
{ (uint32_t)AHARDWAREBUFFER_FORMAT_D32_FLOAT_S8_UINT, VK_FORMAT_D32_SFLOAT_S8_UINT },
{ (uint32_t)AHARDWAREBUFFER_FORMAT_S8_UINT, VK_FORMAT_S8_UINT }
};
// AHardwareBuffer Usage Vulkan Usage or Creation Flag (Intermixed - Aargh!)
// ===================== ===================================================
// None VK_IMAGE_USAGE_TRANSFER_SRC_BIT
// None VK_IMAGE_USAGE_TRANSFER_DST_BIT
// AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE VK_IMAGE_USAGE_SAMPLED_BIT
// AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT
// AHARDWAREBUFFER_USAGE_GPU_COLOR_OUTPUT VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT
// AHARDWAREBUFFER_USAGE_GPU_CUBE_MAP VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT
// AHARDWAREBUFFER_USAGE_GPU_MIPMAP_COMPLETE None
// AHARDWAREBUFFER_USAGE_PROTECTED_CONTENT VK_IMAGE_CREATE_PROTECTED_BIT
// None VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT
// None VK_IMAGE_CREATE_EXTENDED_USAGE_BIT
// Same casting rationale. De-mixing the table to prevent type confusion and aliasing
std::map<uint64_t, VkImageUsageFlags> ahb_usage_map_a2v = {
{ (uint64_t)AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE, (VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT) },
{ (uint64_t)AHARDWAREBUFFER_USAGE_GPU_COLOR_OUTPUT, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT },
{ (uint64_t)AHARDWAREBUFFER_USAGE_GPU_MIPMAP_COMPLETE, 0 }, // No equivalent
};
std::map<uint64_t, VkImageCreateFlags> ahb_create_map_a2v = {
{ (uint64_t)AHARDWAREBUFFER_USAGE_GPU_CUBE_MAP, VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT },
{ (uint64_t)AHARDWAREBUFFER_USAGE_PROTECTED_CONTENT, VK_IMAGE_CREATE_PROTECTED_BIT },
{ (uint64_t)AHARDWAREBUFFER_USAGE_GPU_MIPMAP_COMPLETE, 0 }, // No equivalent
};
std::map<VkImageUsageFlags, uint64_t> ahb_usage_map_v2a = {
{ VK_IMAGE_USAGE_SAMPLED_BIT, (uint64_t)AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE },
{ VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT, (uint64_t)AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE },
{ VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, (uint64_t)AHARDWAREBUFFER_USAGE_GPU_COLOR_OUTPUT },
};
std::map<VkImageCreateFlags, uint64_t> ahb_create_map_v2a = {
{ VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT, (uint64_t)AHARDWAREBUFFER_USAGE_GPU_CUBE_MAP },
{ VK_IMAGE_CREATE_PROTECTED_BIT, (uint64_t)AHARDWAREBUFFER_USAGE_PROTECTED_CONTENT },
};
// clang-format on
//
// AHB-extension new APIs
//
bool CoreChecks::PreCallValidateGetAndroidHardwareBufferPropertiesANDROID(
VkDevice device, const struct AHardwareBuffer *buffer, VkAndroidHardwareBufferPropertiesANDROID *pProperties) const {
bool skip = false;
// buffer must be a valid Android hardware buffer object with at least one of the AHARDWAREBUFFER_USAGE_GPU_* usage flags.
AHardwareBuffer_Desc ahb_desc;
AHardwareBuffer_describe(buffer, &ahb_desc);
uint32_t required_flags = AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE | AHARDWAREBUFFER_USAGE_GPU_COLOR_OUTPUT |
AHARDWAREBUFFER_USAGE_GPU_CUBE_MAP | AHARDWAREBUFFER_USAGE_GPU_MIPMAP_COMPLETE |
AHARDWAREBUFFER_USAGE_GPU_DATA_BUFFER;
if (0 == (ahb_desc.usage & required_flags)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device),
"VUID-vkGetAndroidHardwareBufferPropertiesANDROID-buffer-01884",
"vkGetAndroidHardwareBufferPropertiesANDROID: The AHardwareBuffer's AHardwareBuffer_Desc.usage (0x%" PRIx64
") does not have any AHARDWAREBUFFER_USAGE_GPU_* flags set.",
ahb_desc.usage);
}
return skip;
}
void CoreChecks::PostCallRecordGetAndroidHardwareBufferPropertiesANDROID(VkDevice device, const struct AHardwareBuffer *buffer,
VkAndroidHardwareBufferPropertiesANDROID *pProperties,
VkResult result) {
if (VK_SUCCESS != result) return;
auto ahb_format_props = lvl_find_in_chain<VkAndroidHardwareBufferFormatPropertiesANDROID>(pProperties->pNext);
if (ahb_format_props) {
ahb_ext_formats_set.insert(ahb_format_props->externalFormat);
}
}
bool CoreChecks::PreCallValidateGetMemoryAndroidHardwareBufferANDROID(VkDevice device,
const VkMemoryGetAndroidHardwareBufferInfoANDROID *pInfo,
struct AHardwareBuffer **pBuffer) const {
bool skip = false;
const DEVICE_MEMORY_STATE *mem_info = GetDevMemState(pInfo->memory);
// VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID must have been included in
// VkExportMemoryAllocateInfoKHR::handleTypes when memory was created.
if (!mem_info->is_export ||
(0 == (mem_info->export_handle_type_flags & VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID))) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device),
"VUID-VkMemoryGetAndroidHardwareBufferInfoANDROID-handleTypes-01882",
"vkGetMemoryAndroidHardwareBufferANDROID: %s was not allocated for export, or the "
"export handleTypes (0x%" PRIx32
") did not contain VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID.",
report_data->FormatHandle(pInfo->memory).c_str(), mem_info->export_handle_type_flags);
}
// If the pNext chain of the VkMemoryAllocateInfo used to allocate memory included a VkMemoryDedicatedAllocateInfo
// with non-NULL image member, then that image must already be bound to memory.
if (mem_info->is_dedicated && (VK_NULL_HANDLE != mem_info->dedicated_image)) {
const auto image_state = GetImageState(mem_info->dedicated_image);
if ((nullptr == image_state) || (0 == (image_state->GetBoundMemory().count(pInfo->memory)))) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(device), "VUID-VkMemoryGetAndroidHardwareBufferInfoANDROID-pNext-01883",
"vkGetMemoryAndroidHardwareBufferANDROID: %s was allocated using a dedicated "
"%s, but that image is not bound to the VkDeviceMemory object.",
report_data->FormatHandle(pInfo->memory).c_str(),
report_data->FormatHandle(mem_info->dedicated_image).c_str());
}
}
return skip;
}
//
// AHB-specific validation within non-AHB APIs
//
bool CoreChecks::ValidateAllocateMemoryANDROID(const VkMemoryAllocateInfo *alloc_info) const {
bool skip = false;
auto import_ahb_info = lvl_find_in_chain<VkImportAndroidHardwareBufferInfoANDROID>(alloc_info->pNext);
auto exp_mem_alloc_info = lvl_find_in_chain<VkExportMemoryAllocateInfo>(alloc_info->pNext);
auto mem_ded_alloc_info = lvl_find_in_chain<VkMemoryDedicatedAllocateInfo>(alloc_info->pNext);
if ((import_ahb_info) && (NULL != import_ahb_info->buffer)) {
// This is an import with handleType of VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID
AHardwareBuffer_Desc ahb_desc = {};
AHardwareBuffer_describe(import_ahb_info->buffer, &ahb_desc);
// If buffer is not NULL, it must be a valid Android hardware buffer object with AHardwareBuffer_Desc::format and
// AHardwareBuffer_Desc::usage compatible with Vulkan as described in Android Hardware Buffers.
//
// BLOB & GPU_DATA_BUFFER combo specifically allowed
if ((AHARDWAREBUFFER_FORMAT_BLOB != ahb_desc.format) || (0 == (ahb_desc.usage & AHARDWAREBUFFER_USAGE_GPU_DATA_BUFFER))) {
// Otherwise, must be a combination from the AHardwareBuffer Format and Usage Equivalence tables
// Usage must have at least one bit from the table. It may have additional bits not in the table
uint64_t ahb_equiv_usage_bits = AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE | AHARDWAREBUFFER_USAGE_GPU_COLOR_OUTPUT |
AHARDWAREBUFFER_USAGE_GPU_CUBE_MAP | AHARDWAREBUFFER_USAGE_GPU_MIPMAP_COMPLETE |
AHARDWAREBUFFER_USAGE_PROTECTED_CONTENT;
if ((0 == (ahb_desc.usage & ahb_equiv_usage_bits)) || (0 == ahb_format_map_a2v.count(ahb_desc.format))) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(device), "VUID-VkImportAndroidHardwareBufferInfoANDROID-buffer-01881",
"vkAllocateMemory: The AHardwareBuffer_Desc's format ( %u ) and/or usage ( 0x%" PRIx64
" ) are not compatible with Vulkan.",
ahb_desc.format, ahb_desc.usage);
}
}
// Collect external buffer info
VkPhysicalDeviceExternalBufferInfo pdebi = {};
pdebi.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_BUFFER_INFO;
pdebi.handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID;
if (AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE & ahb_desc.usage) {
pdebi.usage |= ahb_usage_map_a2v[AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE];
}
if (AHARDWAREBUFFER_USAGE_GPU_COLOR_OUTPUT & ahb_desc.usage) {
pdebi.usage |= ahb_usage_map_a2v[AHARDWAREBUFFER_USAGE_GPU_COLOR_OUTPUT];
}
VkExternalBufferProperties ext_buf_props = {};
ext_buf_props.sType = VK_STRUCTURE_TYPE_EXTERNAL_BUFFER_PROPERTIES;
DispatchGetPhysicalDeviceExternalBufferProperties(physical_device, &pdebi, &ext_buf_props);
// Collect external format info
VkPhysicalDeviceExternalImageFormatInfo pdeifi = {};
pdeifi.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_IMAGE_FORMAT_INFO;
pdeifi.handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID;
VkPhysicalDeviceImageFormatInfo2 pdifi2 = {};
pdifi2.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_FORMAT_INFO_2;
pdifi2.pNext = &pdeifi;
if (0 < ahb_format_map_a2v.count(ahb_desc.format)) pdifi2.format = ahb_format_map_a2v[ahb_desc.format];
pdifi2.type = VK_IMAGE_TYPE_2D; // Seems likely
pdifi2.tiling = VK_IMAGE_TILING_OPTIMAL; // Ditto
if (AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE & ahb_desc.usage) {
pdifi2.usage |= ahb_usage_map_a2v[AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE];
}
if (AHARDWAREBUFFER_USAGE_GPU_COLOR_OUTPUT & ahb_desc.usage) {
pdifi2.usage |= ahb_usage_map_a2v[AHARDWAREBUFFER_USAGE_GPU_COLOR_OUTPUT];
}
if (AHARDWAREBUFFER_USAGE_GPU_CUBE_MAP & ahb_desc.usage) {
pdifi2.flags |= ahb_create_map_a2v[AHARDWAREBUFFER_USAGE_GPU_CUBE_MAP];
}
if (AHARDWAREBUFFER_USAGE_PROTECTED_CONTENT & ahb_desc.usage) {
pdifi2.flags |= ahb_create_map_a2v[AHARDWAREBUFFER_USAGE_PROTECTED_CONTENT];
}
VkExternalImageFormatProperties ext_img_fmt_props = {};
ext_img_fmt_props.sType = VK_STRUCTURE_TYPE_EXTERNAL_IMAGE_FORMAT_PROPERTIES;
VkImageFormatProperties2 ifp2 = {};
ifp2.sType = VK_STRUCTURE_TYPE_IMAGE_FORMAT_PROPERTIES_2;
ifp2.pNext = &ext_img_fmt_props;
VkResult fmt_lookup_result = DispatchGetPhysicalDeviceImageFormatProperties2(physical_device, &pdifi2, &ifp2);
// If buffer is not NULL, Android hardware buffers must be supported for import, as reported by
// VkExternalImageFormatProperties or VkExternalBufferProperties.
if (0 == (ext_buf_props.externalMemoryProperties.externalMemoryFeatures & VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT)) {
if ((VK_SUCCESS != fmt_lookup_result) || (0 == (ext_img_fmt_props.externalMemoryProperties.externalMemoryFeatures &
VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT))) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(device), "VUID-VkImportAndroidHardwareBufferInfoANDROID-buffer-01880",
"vkAllocateMemory: Neither the VkExternalImageFormatProperties nor the VkExternalBufferProperties "
"structs for the AHardwareBuffer include the VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT flag.");
}
}
// Retrieve buffer and format properties of the provided AHardwareBuffer
VkAndroidHardwareBufferFormatPropertiesANDROID ahb_format_props = {};
ahb_format_props.sType = VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_FORMAT_PROPERTIES_ANDROID;
VkAndroidHardwareBufferPropertiesANDROID ahb_props = {};
ahb_props.sType = VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_PROPERTIES_ANDROID;
ahb_props.pNext = &ahb_format_props;
DispatchGetAndroidHardwareBufferPropertiesANDROID(device, import_ahb_info->buffer, &ahb_props);
// allocationSize must be the size returned by vkGetAndroidHardwareBufferPropertiesANDROID for the Android hardware buffer
if (alloc_info->allocationSize != ahb_props.allocationSize) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(device), "VUID-VkMemoryAllocateInfo-allocationSize-02383",
"vkAllocateMemory: VkMemoryAllocateInfo struct with chained VkImportAndroidHardwareBufferInfoANDROID "
"struct, allocationSize (%" PRId64
") does not match the AHardwareBuffer's reported allocationSize (%" PRId64 ").",
alloc_info->allocationSize, ahb_props.allocationSize);
}
// memoryTypeIndex must be one of those returned by vkGetAndroidHardwareBufferPropertiesANDROID for the AHardwareBuffer
// Note: memoryTypeIndex is an index, memoryTypeBits is a bitmask
uint32_t mem_type_bitmask = 1 << alloc_info->memoryTypeIndex;
if (0 == (mem_type_bitmask & ahb_props.memoryTypeBits)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(device), "VUID-VkMemoryAllocateInfo-memoryTypeIndex-02385",
"vkAllocateMemory: VkMemoryAllocateInfo struct with chained VkImportAndroidHardwareBufferInfoANDROID "
"struct, memoryTypeIndex (%" PRId32
") does not correspond to a bit set in AHardwareBuffer's reported "
"memoryTypeBits bitmask (0x%" PRIx32 ").",
alloc_info->memoryTypeIndex, ahb_props.memoryTypeBits);
}
// Checks for allocations without a dedicated allocation requirement
if ((nullptr == mem_ded_alloc_info) || (VK_NULL_HANDLE == mem_ded_alloc_info->image)) {
// the Android hardware buffer must have a format of AHARDWAREBUFFER_FORMAT_BLOB and a usage that includes
// AHARDWAREBUFFER_USAGE_GPU_DATA_BUFFER
if (((uint64_t)AHARDWAREBUFFER_FORMAT_BLOB != ahb_desc.format) ||
(0 == (ahb_desc.usage & AHARDWAREBUFFER_USAGE_GPU_DATA_BUFFER))) {
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device),
"VUID-VkMemoryAllocateInfo-pNext-02384",
"vkAllocateMemory: VkMemoryAllocateInfo struct with chained VkImportAndroidHardwareBufferInfoANDROID "
"struct without a dedicated allocation requirement, while the AHardwareBuffer_Desc's format ( %u ) is not "
"AHARDWAREBUFFER_FORMAT_BLOB or usage (0x%" PRIx64 ") does not include AHARDWAREBUFFER_USAGE_GPU_DATA_BUFFER.",
ahb_desc.format, ahb_desc.usage);
}
} else { // Checks specific to import with a dedicated allocation requirement
const VkImageCreateInfo *ici = &(GetImageState(mem_ded_alloc_info->image)->createInfo);
// The Android hardware buffer's usage must include at least one of AHARDWAREBUFFER_USAGE_GPU_COLOR_OUTPUT or
// AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE
if (0 == (ahb_desc.usage & (AHARDWAREBUFFER_USAGE_GPU_COLOR_OUTPUT | AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE))) {
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device),
"VUID-VkMemoryAllocateInfo-pNext-02386",
"vkAllocateMemory: VkMemoryAllocateInfo struct with chained VkImportAndroidHardwareBufferInfoANDROID and a "
"dedicated allocation requirement, while the AHardwareBuffer's usage (0x%" PRIx64
") contains neither AHARDWAREBUFFER_USAGE_GPU_COLOR_OUTPUT nor AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE.",
ahb_desc.usage);
}
// the format of image must be VK_FORMAT_UNDEFINED or the format returned by
// vkGetAndroidHardwareBufferPropertiesANDROID
if ((ici->format != ahb_format_props.format) && (VK_FORMAT_UNDEFINED != ici->format)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(device), "VUID-VkMemoryAllocateInfo-pNext-02387",
"vkAllocateMemory: VkMemoryAllocateInfo struct with chained "
"VkImportAndroidHardwareBufferInfoANDROID, the dedicated allocation image's "
"format (%s) is not VK_FORMAT_UNDEFINED and does not match the AHardwareBuffer's format (%s).",
string_VkFormat(ici->format), string_VkFormat(ahb_format_props.format));
}
// The width, height, and array layer dimensions of image and the Android hardwarebuffer must be identical
if ((ici->extent.width != ahb_desc.width) || (ici->extent.height != ahb_desc.height) ||
(ici->arrayLayers != ahb_desc.layers)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(device), "VUID-VkMemoryAllocateInfo-pNext-02388",
"vkAllocateMemory: VkMemoryAllocateInfo struct with chained "
"VkImportAndroidHardwareBufferInfoANDROID, the dedicated allocation image's "
"width, height, and arrayLayers (%" PRId32 " %" PRId32 " %" PRId32
") do not match those of the AHardwareBuffer (%" PRId32 " %" PRId32 " %" PRId32 ").",
ici->extent.width, ici->extent.height, ici->arrayLayers, ahb_desc.width, ahb_desc.height,
ahb_desc.layers);
}
// If the Android hardware buffer's usage includes AHARDWAREBUFFER_USAGE_GPU_MIPMAP_COMPLETE, the image must
// have either a full mipmap chain or exactly 1 mip level.
//
// NOTE! The language of this VUID contradicts the language in the spec (1.1.93), which says "The
// AHARDWAREBUFFER_USAGE_GPU_MIPMAP_COMPLETE flag does not correspond to a Vulkan image usage or creation flag. Instead,
// its presence indicates that the Android hardware buffer contains a complete mipmap chain, and its absence indicates
// that the Android hardware buffer contains only a single mip level."
//
// TODO: This code implements the VUID's meaning, but it seems likely that the spec text is actually correct.
// Clarification requested.
if ((ahb_desc.usage & AHARDWAREBUFFER_USAGE_GPU_MIPMAP_COMPLETE) && (ici->mipLevels != 1) &&
(ici->mipLevels != FullMipChainLevels(ici->extent))) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(device), "VUID-VkMemoryAllocateInfo-pNext-02389",
"vkAllocateMemory: VkMemoryAllocateInfo struct with chained VkImportAndroidHardwareBufferInfoANDROID, "
"usage includes AHARDWAREBUFFER_USAGE_GPU_MIPMAP_COMPLETE but mipLevels (%" PRId32
") is neither 1 nor full mip "
"chain levels (%" PRId32 ").",
ici->mipLevels, FullMipChainLevels(ici->extent));
}
// each bit set in the usage of image must be listed in AHardwareBuffer Usage Equivalence, and if there is a
// corresponding AHARDWAREBUFFER_USAGE bit listed that bit must be included in the Android hardware buffer's
// AHardwareBuffer_Desc::usage
if (ici->usage &
~(VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT |
VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT)) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(device), "VUID-VkMemoryAllocateInfo-pNext-02390",
"vkAllocateMemory: VkMemoryAllocateInfo struct with chained VkImportAndroidHardwareBufferInfoANDROID, "
"dedicated image usage bits include one or more with no AHardwareBuffer equivalent.");
}
bool illegal_usage = false;
std::vector<VkImageUsageFlags> usages = {VK_IMAGE_USAGE_SAMPLED_BIT, VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT,
VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT};
for (VkImageUsageFlags ubit : usages) {
if (ici->usage & ubit) {
uint64_t ahb_usage = ahb_usage_map_v2a[ubit];
if (0 == (ahb_usage & ahb_desc.usage)) illegal_usage = true;
}
}
if (illegal_usage) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(device), "VUID-VkMemoryAllocateInfo-pNext-02390",
"vkAllocateMemory: VkMemoryAllocateInfo struct with chained "
"VkImportAndroidHardwareBufferInfoANDROID, one or more AHardwareBuffer usage bits equivalent to "
"the provided image's usage bits are missing from AHardwareBuffer_Desc.usage.");
}
}
} else { // Not an import
if ((exp_mem_alloc_info) && (mem_ded_alloc_info) &&
(0 != (VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID & exp_mem_alloc_info->handleTypes)) &&
(VK_NULL_HANDLE != mem_ded_alloc_info->image)) {
// This is an Android HW Buffer export
if (0 != alloc_info->allocationSize) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(device), "VUID-VkMemoryAllocateInfo-pNext-01874",
"vkAllocateMemory: pNext chain indicates a dedicated Android Hardware Buffer export allocation, "
"but allocationSize is non-zero.");
}
} else {
if (0 == alloc_info->allocationSize) {
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device),
"VUID-VkMemoryAllocateInfo-pNext-01874",
"vkAllocateMemory: pNext chain does not indicate a dedicated export allocation, but allocationSize is 0.");
};
}
}
return skip;
}
bool CoreChecks::ValidateGetImageMemoryRequirements2ANDROID(const VkImage image) const {
bool skip = false;
const IMAGE_STATE *image_state = GetImageState(image);
if (image_state->imported_ahb && (0 == image_state->GetBoundMemory().size())) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, HandleToUint64(image),
"VUID-VkImageMemoryRequirementsInfo2-image-01897",
"vkGetImageMemoryRequirements2: Attempt to query layout from an image created with "
"VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID handleType, which has not yet been "
"bound to memory.");
}
return skip;
}
static bool ValidateGetPhysicalDeviceImageFormatProperties2ANDROID(const debug_report_data *report_data,
const VkPhysicalDeviceImageFormatInfo2 *pImageFormatInfo,
const VkImageFormatProperties2 *pImageFormatProperties) {
bool skip = false;
const VkAndroidHardwareBufferUsageANDROID *ahb_usage =
lvl_find_in_chain<VkAndroidHardwareBufferUsageANDROID>(pImageFormatProperties->pNext);
if (nullptr != ahb_usage) {
const VkPhysicalDeviceExternalImageFormatInfo *pdeifi =
lvl_find_in_chain<VkPhysicalDeviceExternalImageFormatInfo>(pImageFormatInfo->pNext);
if ((nullptr == pdeifi) || (VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID != pdeifi->handleType)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-vkGetPhysicalDeviceImageFormatProperties2-pNext-01868",
"vkGetPhysicalDeviceImageFormatProperties2: pImageFormatProperties includes a chained "
"VkAndroidHardwareBufferUsageANDROID struct, but pImageFormatInfo does not include a chained "
"VkPhysicalDeviceExternalImageFormatInfo struct with handleType "
"VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID.");
}
}
return skip;
}
bool CoreChecks::ValidateCreateSamplerYcbcrConversionANDROID(const VkSamplerYcbcrConversionCreateInfo *create_info) const {
const VkExternalFormatANDROID *ext_format_android = lvl_find_in_chain<VkExternalFormatANDROID>(create_info->pNext);
if ((nullptr != ext_format_android) && (0 != ext_format_android->externalFormat)) {
if (VK_FORMAT_UNDEFINED != create_info->format) {
return log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION_EXT, 0,
"VUID-VkSamplerYcbcrConversionCreateInfo-format-01904",
"vkCreateSamplerYcbcrConversion[KHR]: CreateInfo format is not VK_FORMAT_UNDEFINED while "
"there is a chained VkExternalFormatANDROID struct.");
}
} else if (VK_FORMAT_UNDEFINED == create_info->format) {
return log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION_EXT, 0,
"VUID-VkSamplerYcbcrConversionCreateInfo-format-01904",
"vkCreateSamplerYcbcrConversion[KHR]: CreateInfo format is VK_FORMAT_UNDEFINED with no chained "
"VkExternalFormatANDROID struct.");
}
return false;
}
#else // !VK_USE_PLATFORM_ANDROID_KHR
bool CoreChecks::ValidateAllocateMemoryANDROID(const VkMemoryAllocateInfo *alloc_info) const { return false; }
static bool ValidateGetPhysicalDeviceImageFormatProperties2ANDROID(const debug_report_data *report_data,
const VkPhysicalDeviceImageFormatInfo2 *pImageFormatInfo,
const VkImageFormatProperties2 *pImageFormatProperties) {
return false;
}
bool CoreChecks::ValidateCreateSamplerYcbcrConversionANDROID(const VkSamplerYcbcrConversionCreateInfo *create_info) const {
return false;
}
bool CoreChecks::ValidateGetImageMemoryRequirements2ANDROID(const VkImage image) const { return false; }
#endif // VK_USE_PLATFORM_ANDROID_KHR
bool CoreChecks::PreCallValidateAllocateMemory(VkDevice device, const VkMemoryAllocateInfo *pAllocateInfo,
const VkAllocationCallbacks *pAllocator, VkDeviceMemory *pMemory) const {
bool skip = false;
if (memObjMap.size() >= phys_dev_props.limits.maxMemoryAllocationCount) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device),
kVUIDUndefined, "Number of currently valid memory objects is not less than the maximum allowed (%u).",
phys_dev_props.limits.maxMemoryAllocationCount);
}
if (device_extensions.vk_android_external_memory_android_hardware_buffer) {
skip |= ValidateAllocateMemoryANDROID(pAllocateInfo);
} else {
if (0 == pAllocateInfo->allocationSize) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device),
"VUID-VkMemoryAllocateInfo-allocationSize-00638", "vkAllocateMemory: allocationSize is 0.");
};
}
auto chained_flags_struct = lvl_find_in_chain<VkMemoryAllocateFlagsInfo>(pAllocateInfo->pNext);
if (chained_flags_struct && chained_flags_struct->flags == VK_MEMORY_ALLOCATE_DEVICE_MASK_BIT) {
skip |= ValidateDeviceMaskToPhysicalDeviceCount(chained_flags_struct->deviceMask, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(device), "VUID-VkMemoryAllocateFlagsInfo-deviceMask-00675");
skip |= ValidateDeviceMaskToZero(chained_flags_struct->deviceMask, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(device), "VUID-VkMemoryAllocateFlagsInfo-deviceMask-00676");
}
// TODO: VUIDs ending in 00643, 00644, 00646, 00647, 01742, 01743, 01745, 00645, 00648, 01744
return skip;
}
// For given obj node, if it is use, flag a validation error and return callback result, else return false
bool CoreChecks::ValidateObjectNotInUse(const BASE_NODE *obj_node, const VulkanTypedHandle &obj_struct, const char *caller_name,
const char *error_code) const {
if (disabled.object_in_use) return false;
bool skip = false;
if (obj_node->in_use.load()) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, get_debug_report_enum[obj_struct.type], obj_struct.handle,
error_code, "Cannot call %s on %s that is currently in use by a command buffer.", caller_name,
report_data->FormatHandle(obj_struct).c_str());
}
return skip;
}
bool CoreChecks::PreCallValidateFreeMemory(VkDevice device, VkDeviceMemory mem, const VkAllocationCallbacks *pAllocator) const {
const DEVICE_MEMORY_STATE *mem_info = GetDevMemState(mem);
const VulkanTypedHandle obj_struct(mem, kVulkanObjectTypeDeviceMemory);
bool skip = false;
if (mem_info) {
skip |= ValidateObjectNotInUse(mem_info, obj_struct, "vkFreeMemory", "VUID-vkFreeMemory-memory-00677");
}
return skip;
}
// Validate that given Map memory range is valid. This means that the memory should not already be mapped,
// and that the size of the map range should be:
// 1. Not zero
// 2. Within the size of the memory allocation
bool CoreChecks::ValidateMapMemRange(const DEVICE_MEMORY_STATE *mem_info, VkDeviceSize offset, VkDeviceSize size) const {
bool skip = false;
assert(mem_info);
const auto mem = mem_info->mem;
if (size == 0) {
skip =
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, HandleToUint64(mem),
kVUID_Core_MemTrack_InvalidMap, "VkMapMemory: Attempting to map memory range of size zero");
}
// It is an application error to call VkMapMemory on an object that is already mapped
if (mem_info->mapped_range.size != 0) {
skip = log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
HandleToUint64(mem), kVUID_Core_MemTrack_InvalidMap,
"VkMapMemory: Attempting to map memory on an already-mapped %s.", report_data->FormatHandle(mem).c_str());
}
// Validate that offset + size is within object's allocationSize
if (size == VK_WHOLE_SIZE) {
if (offset >= mem_info->alloc_info.allocationSize) {
skip = log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
HandleToUint64(mem), kVUID_Core_MemTrack_InvalidMap,
"Mapping Memory from 0x%" PRIx64 " to 0x%" PRIx64
" with size of VK_WHOLE_SIZE oversteps total array size 0x%" PRIx64,
offset, mem_info->alloc_info.allocationSize, mem_info->alloc_info.allocationSize);
}
} else {
if ((offset + size) > mem_info->alloc_info.allocationSize) {
skip = log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
HandleToUint64(mem), "VUID-vkMapMemory-size-00681",
"Mapping Memory from 0x%" PRIx64 " to 0x%" PRIx64 " oversteps total array size 0x%" PRIx64 ".", offset,
size + offset, mem_info->alloc_info.allocationSize);
}
}
return skip;
}
// Guard value for pad data
static char NoncoherentMemoryFillValue = 0xb;
void CoreChecks::InitializeShadowMemory(VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size, void **ppData) {
auto mem_info = GetDevMemState(mem);
if (mem_info) {
uint32_t index = mem_info->alloc_info.memoryTypeIndex;
if (phys_dev_mem_props.memoryTypes[index].propertyFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) {
mem_info->shadow_copy = 0;
} else {
if (size == VK_WHOLE_SIZE) {
size = mem_info->alloc_info.allocationSize - offset;
}
mem_info->shadow_pad_size = phys_dev_props.limits.minMemoryMapAlignment;
assert(SafeModulo(mem_info->shadow_pad_size, phys_dev_props.limits.minMemoryMapAlignment) == 0);
// Ensure start of mapped region reflects hardware alignment constraints
uint64_t map_alignment = phys_dev_props.limits.minMemoryMapAlignment;
// From spec: (ppData - offset) must be aligned to at least limits::minMemoryMapAlignment.
uint64_t start_offset = offset % map_alignment;
// Data passed to driver will be wrapped by a guardband of data to detect over- or under-writes.
mem_info->shadow_copy_base =
malloc(static_cast<size_t>(2 * mem_info->shadow_pad_size + size + map_alignment + start_offset));
mem_info->shadow_copy =
reinterpret_cast<char *>((reinterpret_cast<uintptr_t>(mem_info->shadow_copy_base) + map_alignment) &
~(map_alignment - 1)) +
start_offset;
assert(SafeModulo(reinterpret_cast<uintptr_t>(mem_info->shadow_copy) + mem_info->shadow_pad_size - start_offset,
map_alignment) == 0);
memset(mem_info->shadow_copy, NoncoherentMemoryFillValue, static_cast<size_t>(2 * mem_info->shadow_pad_size + size));
*ppData = static_cast<char *>(mem_info->shadow_copy) + mem_info->shadow_pad_size;
}
}
}
bool CoreChecks::PreCallValidateWaitForFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences, VkBool32 waitAll,
uint64_t timeout) const {
// Verify fence status of submitted fences
bool skip = false;
for (uint32_t i = 0; i < fenceCount; i++) {
skip |= VerifyQueueStateToFence(pFences[i]);
}
return skip;
}
bool CoreChecks::ValidateGetDeviceQueue(uint32_t queueFamilyIndex, uint32_t queueIndex, VkQueue *pQueue, const char *valid_qfi_vuid,
const char *qfi_in_range_vuid) const {
bool skip = false;
skip |= ValidateDeviceQueueFamily(queueFamilyIndex, "vkGetDeviceQueue", "queueFamilyIndex", valid_qfi_vuid);
const auto &queue_data = queue_family_index_map.find(queueFamilyIndex);
if (queue_data != queue_family_index_map.end() && queue_data->second <= queueIndex) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device),
qfi_in_range_vuid,
"vkGetDeviceQueue: queueIndex (=%" PRIu32
") is not less than the number of queues requested from queueFamilyIndex (=%" PRIu32
") when the device was created (i.e. is not less than %" PRIu32 ").",
queueIndex, queueFamilyIndex, queue_data->second);
}
return skip;
}
bool CoreChecks::PreCallValidateGetDeviceQueue(VkDevice device, uint32_t queueFamilyIndex, uint32_t queueIndex,
VkQueue *pQueue) const {
return ValidateGetDeviceQueue(queueFamilyIndex, queueIndex, pQueue, "VUID-vkGetDeviceQueue-queueFamilyIndex-00384",
"VUID-vkGetDeviceQueue-queueIndex-00385");
}
bool CoreChecks::PreCallValidateQueueWaitIdle(VkQueue queue) const {
const QUEUE_STATE *queue_state = GetQueueState(queue);
return VerifyQueueStateToSeq(queue_state, queue_state->seq + queue_state->submissions.size());
}
bool CoreChecks::PreCallValidateDeviceWaitIdle(VkDevice device) const {
bool skip = false;
const auto &const_queue_map = queueMap;
for (auto &queue : const_queue_map) {
skip |= VerifyQueueStateToSeq(&queue.second, queue.second.seq + queue.second.submissions.size());
}
return skip;
}
bool CoreChecks::PreCallValidateCreateSemaphore(VkDevice device, const VkSemaphoreCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkSemaphore *pSemaphore) const {
bool skip = false;
auto *sem_type_create_info = lvl_find_in_chain<VkSemaphoreTypeCreateInfoKHR>(pCreateInfo->pNext);
if (sem_type_create_info && sem_type_create_info->semaphoreType == VK_SEMAPHORE_TYPE_TIMELINE_KHR &&
!enabled_features.timeline_semaphore_features.timelineSemaphore) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT, 0,
"VUID-VkSemaphoreTypeCreateInfoKHR-timelineSemaphore-03252",
"VkCreateSemaphore: timelineSemaphore feature is not enabled, can not create timeline semaphores");
}
if (sem_type_create_info && sem_type_create_info->semaphoreType == VK_SEMAPHORE_TYPE_BINARY_KHR &&
sem_type_create_info->initialValue != 0) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT, 0,
"VUID-VkSemaphoreTypeCreateInfoKHR-semaphoreType-03279",
"vkCreateSemaphore: if semaphoreType is VK_SEMAPHORE_TYPE_BINARY_KHR, initialValue must be zero");
}
return skip;
}
bool CoreChecks::PreCallValidateWaitSemaphoresKHR(VkDevice device, const VkSemaphoreWaitInfoKHR *pWaitInfo,
uint64_t timeout) const {
bool skip = false;
for (uint32_t i = 0; i < pWaitInfo->semaphoreCount; i++) {
auto *pSemaphore = GetSemaphoreState(pWaitInfo->pSemaphores[i]);
if (pSemaphore && pSemaphore->type != VK_SEMAPHORE_TYPE_TIMELINE_KHR) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
HandleToUint64(pWaitInfo->pSemaphores[i]), "VUID-VkSemaphoreWaitInfoKHR-pSemaphores-03256",
"VkWaitSemaphoresKHR: all semaphores in pWaitInfo must be timeline semaphores, but %s is not",
report_data->FormatHandle(pWaitInfo->pSemaphores[i]).c_str());
}
}
return skip;
}
bool CoreChecks::PreCallValidateDestroyFence(VkDevice device, VkFence fence, const VkAllocationCallbacks *pAllocator) const {
const FENCE_STATE *fence_node = GetFenceState(fence);
bool skip = false;
if (fence_node) {
if (fence_node->scope == kSyncScopeInternal && fence_node->state == FENCE_INFLIGHT) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT, HandleToUint64(fence),
"VUID-vkDestroyFence-fence-01120", "%s is in use.", report_data->FormatHandle(fence).c_str());
}
}
return skip;
}
bool CoreChecks::PreCallValidateDestroySemaphore(VkDevice device, VkSemaphore semaphore,
const VkAllocationCallbacks *pAllocator) const {
const SEMAPHORE_STATE *sema_node = GetSemaphoreState(semaphore);
const VulkanTypedHandle obj_struct(semaphore, kVulkanObjectTypeSemaphore);
bool skip = false;
if (sema_node) {
skip |= ValidateObjectNotInUse(sema_node, obj_struct, "vkDestroySemaphore", "VUID-vkDestroySemaphore-semaphore-01137");
}
return skip;
}
bool CoreChecks::PreCallValidateDestroyEvent(VkDevice device, VkEvent event, const VkAllocationCallbacks *pAllocator) const {
const EVENT_STATE *event_state = GetEventState(event);
const VulkanTypedHandle obj_struct(event, kVulkanObjectTypeEvent);
bool skip = false;
if (event_state) {
skip |= ValidateObjectNotInUse(event_state, obj_struct, "vkDestroyEvent", "VUID-vkDestroyEvent-event-01145");
}
return skip;
}
bool CoreChecks::PreCallValidateDestroyQueryPool(VkDevice device, VkQueryPool queryPool,
const VkAllocationCallbacks *pAllocator) const {
if (disabled.query_validation) return false;
const QUERY_POOL_STATE *qp_state = GetQueryPoolState(queryPool);
const VulkanTypedHandle obj_struct(queryPool, kVulkanObjectTypeQueryPool);
bool skip = false;
if (qp_state) {
skip |= ValidateObjectNotInUse(qp_state, obj_struct, "vkDestroyQueryPool", "VUID-vkDestroyQueryPool-queryPool-00793");
}
return skip;
}
bool CoreChecks::ValidatePerformanceQueryResults(const char *cmd_name, const QUERY_POOL_STATE *query_pool_state,
uint32_t firstQuery, uint32_t queryCount, VkQueryResultFlags flags) const {
bool skip = false;
if (flags & (VK_QUERY_RESULT_WITH_AVAILABILITY_BIT | VK_QUERY_RESULT_PARTIAL_BIT | VK_QUERY_RESULT_64_BIT)) {
string invalid_flags_string;
for (auto flag : {VK_QUERY_RESULT_WITH_AVAILABILITY_BIT, VK_QUERY_RESULT_PARTIAL_BIT, VK_QUERY_RESULT_64_BIT}) {
if (flag & flags) {
if (invalid_flags_string.size()) {
invalid_flags_string += " and ";
}
invalid_flags_string += string_VkQueryResultFlagBits(flag);
}
}
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0,
strcmp(cmd_name, "vkGetQueryPoolResults") == 0 ? "VUID-vkGetQueryPoolResults-queryType-03230"
: "VUID-vkCmdCopyQueryPoolResults-queryType-03233",
"%s: QueryPool %s was created with a queryType of"
"VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR but flags contains %s.",
cmd_name, report_data->FormatHandle(query_pool_state->pool).c_str(), invalid_flags_string.c_str());
}
QueryObject query_obj{query_pool_state->pool, 0u};
for (uint32_t queryIndex = firstQuery; queryIndex < queryCount; queryIndex++) {
query_obj.query = queryIndex;
uint32_t submitted = 0;
for (uint32_t passIndex = 0; passIndex < query_pool_state->n_performance_passes; passIndex++) {
auto query_pass_iter = queryPassToStateMap.find(QueryObjectPass(query_obj, passIndex));
if (query_pass_iter != queryPassToStateMap.end() && query_pass_iter->second == QUERYSTATE_AVAILABLE) submitted++;
}
if (submitted < query_pool_state->n_performance_passes) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0,
"VUID-vkGetQueryPoolResults-queryType-03231",
"%s: QueryPool %s has %u performance query passes, but the query has only been "
"submitted for %u of the passes.",
cmd_name, report_data->FormatHandle(query_pool_state->pool).c_str(),
query_pool_state->n_performance_passes, submitted);
}
}
return skip;
}
bool CoreChecks::ValidateGetQueryPoolPerformanceResults(VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount,
void *pData, VkDeviceSize stride, VkQueryResultFlags flags) const {
bool skip = false;
const auto query_pool_state = GetQueryPoolState(queryPool);
if (!query_pool_state || query_pool_state->createInfo.queryType != VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR) return skip;
if (((((uintptr_t)pData) % sizeof(VkPerformanceCounterResultKHR)) != 0 ||
(stride % sizeof(VkPerformanceCounterResultKHR)) != 0)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0,
"VUID-vkGetQueryPoolResults-queryType-03229",
"QueryPool %s was created with a queryType of "
"VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR but pData & stride are not multiple of the "
"size of VkPerformanceCounterResultKHR.",
report_data->FormatHandle(queryPool).c_str());
}
skip |= ValidatePerformanceQueryResults("vkGetQueryPoolResults", query_pool_state, firstQuery, queryCount, flags);
return skip;
}
bool CoreChecks::ValidateGetQueryPoolResultsFlags(VkQueryPool queryPool, VkQueryResultFlags flags) const {
bool skip = false;
const auto query_pool_state = GetQueryPoolState(queryPool);
if (query_pool_state) {
if ((query_pool_state->createInfo.queryType == VK_QUERY_TYPE_TIMESTAMP) && (flags & VK_QUERY_RESULT_PARTIAL_BIT)) {
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, HandleToUint64(queryPool),
"VUID-vkGetQueryPoolResults-queryType-00818",
"%s was created with a queryType of VK_QUERY_TYPE_TIMESTAMP but flags contains VK_QUERY_RESULT_PARTIAL_BIT.",
report_data->FormatHandle(queryPool).c_str());
}
}
return skip;
}
bool CoreChecks::ValidateGetQueryPoolResultsQueries(VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount) const {
bool skip = false;
QueryObject query_obj{queryPool, 0u};
for (uint32_t i = 0; i < queryCount; ++i) {
query_obj.query = firstQuery + i;
if (queryToStateMap.count(query_obj) == 0) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT,
HandleToUint64(queryPool), kVUID_Core_DrawState_InvalidQuery,
"vkGetQueryPoolResults() on %s and query %" PRIu32 ": unknown query",
report_data->FormatHandle(queryPool).c_str(), query_obj.query);
}
}
return skip;
}
bool CoreChecks::PreCallValidateGetQueryPoolResults(VkDevice device, VkQueryPool queryPool, uint32_t firstQuery,
uint32_t queryCount, size_t dataSize, void *pData, VkDeviceSize stride,
VkQueryResultFlags flags) const {
if (disabled.query_validation) return false;
bool skip = false;
skip |= ValidateQueryPoolStride("VUID-vkGetQueryPoolResults-flags-00814", "VUID-vkGetQueryPoolResults-flags-00815", stride,
"dataSize", dataSize, flags);
skip |= ValidateGetQueryPoolResultsFlags(queryPool, flags);
skip |= ValidateGetQueryPoolResultsQueries(queryPool, firstQuery, queryCount);
skip |= ValidateGetQueryPoolPerformanceResults(queryPool, firstQuery, queryCount, pData, stride, flags);
return skip;
}
bool CoreChecks::ValidateInsertMemoryRange(const VulkanTypedHandle &typed_handle, const DEVICE_MEMORY_STATE *mem_info,
VkDeviceSize memoryOffset, const VkMemoryRequirements &memRequirements, bool is_linear,
const char *api_name) const {
bool skip = false;
if (memoryOffset >= mem_info->alloc_info.allocationSize) {
const char *error_code = nullptr;
if (typed_handle.type == kVulkanObjectTypeBuffer) {
error_code = "VUID-vkBindBufferMemory-memoryOffset-01031";
} else if (typed_handle.type == kVulkanObjectTypeImage) {
error_code = "VUID-vkBindImageMemory-memoryOffset-01046";
} else if (typed_handle.type == kVulkanObjectTypeAccelerationStructureNV) {
error_code = "VUID-VkBindAccelerationStructureMemoryInfoNV-memoryOffset-02451";
} else {
// Unsupported object type
assert(false);
}
skip = log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
HandleToUint64(mem_info->mem), error_code,
"In %s, attempting to bind %s to %s, memoryOffset=0x%" PRIxLEAST64
" must be less than the memory allocation size 0x%" PRIxLEAST64 ".",
api_name, report_data->FormatHandle(mem_info->mem).c_str(), report_data->FormatHandle(typed_handle).c_str(),
memoryOffset, mem_info->alloc_info.allocationSize);
}
return skip;
}
bool CoreChecks::ValidateInsertImageMemoryRange(VkImage image, const DEVICE_MEMORY_STATE *mem_info, VkDeviceSize mem_offset,
const VkMemoryRequirements &mem_reqs, bool is_linear, const char *api_name) const {
return ValidateInsertMemoryRange(VulkanTypedHandle(image, kVulkanObjectTypeImage), mem_info, mem_offset, mem_reqs, is_linear,
api_name);
}
bool CoreChecks::ValidateInsertBufferMemoryRange(VkBuffer buffer, const DEVICE_MEMORY_STATE *mem_info, VkDeviceSize mem_offset,
const VkMemoryRequirements &mem_reqs, const char *api_name) const {
return ValidateInsertMemoryRange(VulkanTypedHandle(buffer, kVulkanObjectTypeBuffer), mem_info, mem_offset, mem_reqs, true,
api_name);
}
bool CoreChecks::ValidateInsertAccelerationStructureMemoryRange(VkAccelerationStructureNV as, const DEVICE_MEMORY_STATE *mem_info,
VkDeviceSize mem_offset, const VkMemoryRequirements &mem_reqs,
const char *api_name) const {
return ValidateInsertMemoryRange(VulkanTypedHandle(as, kVulkanObjectTypeAccelerationStructureNV), mem_info, mem_offset,
mem_reqs, true, api_name);
}
bool CoreChecks::ValidateMemoryTypes(const DEVICE_MEMORY_STATE *mem_info, const uint32_t memory_type_bits, const char *funcName,
const char *msgCode) const {
bool skip = false;
if (((1 << mem_info->alloc_info.memoryTypeIndex) & memory_type_bits) == 0) {
skip = log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
HandleToUint64(mem_info->mem), msgCode,
"%s(): MemoryRequirements->memoryTypeBits (0x%X) for this object type are not compatible with the memory "
"type (0x%X) of %s.",
funcName, memory_type_bits, mem_info->alloc_info.memoryTypeIndex,
report_data->FormatHandle(mem_info->mem).c_str());
}
return skip;
}
bool CoreChecks::ValidateBindBufferMemory(VkBuffer buffer, VkDeviceMemory mem, VkDeviceSize memoryOffset,
const char *api_name) const {
const BUFFER_STATE *buffer_state = GetBufferState(buffer);
bool skip = false;
if (buffer_state) {
// Track objects tied to memory
uint64_t buffer_handle = HandleToUint64(buffer);
const VulkanTypedHandle obj_struct(buffer, kVulkanObjectTypeBuffer);
skip = ValidateSetMemBinding(mem, obj_struct, api_name);
// Validate bound memory range information
const auto mem_info = GetDevMemState(mem);
if (mem_info) {
skip |= ValidateInsertBufferMemoryRange(buffer, mem_info, memoryOffset, buffer_state->requirements, api_name);
skip |= ValidateMemoryTypes(mem_info, buffer_state->requirements.memoryTypeBits, api_name,
"VUID-vkBindBufferMemory-memory-01035");
}
// Validate memory requirements alignment
if (SafeModulo(memoryOffset, buffer_state->requirements.alignment) != 0) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, buffer_handle,
"VUID-vkBindBufferMemory-memoryOffset-01036",
"%s: memoryOffset is 0x%" PRIxLEAST64
" but must be an integer multiple of the VkMemoryRequirements::alignment value 0x%" PRIxLEAST64
", returned from a call to vkGetBufferMemoryRequirements with buffer.",
api_name, memoryOffset, buffer_state->requirements.alignment);
}
if (mem_info) {
// Validate memory requirements size
if (buffer_state->requirements.size > (mem_info->alloc_info.allocationSize - memoryOffset)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, buffer_handle,
"VUID-vkBindBufferMemory-size-01037",
"%s: memory size minus memoryOffset is 0x%" PRIxLEAST64
" but must be at least as large as VkMemoryRequirements::size value 0x%" PRIxLEAST64
", returned from a call to vkGetBufferMemoryRequirements with buffer.",
api_name, mem_info->alloc_info.allocationSize - memoryOffset, buffer_state->requirements.size);
}
// Validate dedicated allocation
if (mem_info->is_dedicated && ((mem_info->dedicated_buffer != buffer) || (memoryOffset != 0))) {
// TODO: Add vkBindBufferMemory2KHR error message when added to spec.
auto validation_error = kVUIDUndefined;
if (strcmp(api_name, "vkBindBufferMemory()") == 0) {
validation_error = "VUID-vkBindBufferMemory-memory-01508";
}
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, buffer_handle,
validation_error,
"%s: for dedicated %s, VkMemoryDedicatedAllocateInfoKHR::buffer %s must be equal "
"to %s and memoryOffset 0x%" PRIxLEAST64 " must be zero.",
api_name, report_data->FormatHandle(mem).c_str(),
report_data->FormatHandle(mem_info->dedicated_buffer).c_str(),
report_data->FormatHandle(buffer).c_str(), memoryOffset);
}
}
}
return skip;
}
bool CoreChecks::PreCallValidateBindBufferMemory(VkDevice device, VkBuffer buffer, VkDeviceMemory mem,
VkDeviceSize memoryOffset) const {
const char *api_name = "vkBindBufferMemory()";
return ValidateBindBufferMemory(buffer, mem, memoryOffset, api_name);
}
bool CoreChecks::PreCallValidateBindBufferMemory2(VkDevice device, uint32_t bindInfoCount,
const VkBindBufferMemoryInfoKHR *pBindInfos) const {
char api_name[64];
bool skip = false;
for (uint32_t i = 0; i < bindInfoCount; i++) {
sprintf(api_name, "vkBindBufferMemory2() pBindInfos[%u]", i);
skip |= ValidateBindBufferMemory(pBindInfos[i].buffer, pBindInfos[i].memory, pBindInfos[i].memoryOffset, api_name);
}
return skip;
}
bool CoreChecks::PreCallValidateBindBufferMemory2KHR(VkDevice device, uint32_t bindInfoCount,
const VkBindBufferMemoryInfoKHR *pBindInfos) const {
char api_name[64];
bool skip = false;
for (uint32_t i = 0; i < bindInfoCount; i++) {
sprintf(api_name, "vkBindBufferMemory2KHR() pBindInfos[%u]", i);
skip |= ValidateBindBufferMemory(pBindInfos[i].buffer, pBindInfos[i].memory, pBindInfos[i].memoryOffset, api_name);
}
return skip;
}
bool CoreChecks::ValidateGetImageMemoryRequirements2(const VkImageMemoryRequirementsInfo2 *pInfo) const {
bool skip = false;
if (device_extensions.vk_android_external_memory_android_hardware_buffer) {
skip |= ValidateGetImageMemoryRequirements2ANDROID(pInfo->image);
}
return skip;
}
bool CoreChecks::PreCallValidateGetImageMemoryRequirements2(VkDevice device, const VkImageMemoryRequirementsInfo2 *pInfo,
VkMemoryRequirements2 *pMemoryRequirements) const {
return ValidateGetImageMemoryRequirements2(pInfo);
}
bool CoreChecks::PreCallValidateGetImageMemoryRequirements2KHR(VkDevice device, const VkImageMemoryRequirementsInfo2 *pInfo,
VkMemoryRequirements2 *pMemoryRequirements) const {
return ValidateGetImageMemoryRequirements2(pInfo);
}
bool CoreChecks::PreCallValidateGetPhysicalDeviceImageFormatProperties2(VkPhysicalDevice physicalDevice,
const VkPhysicalDeviceImageFormatInfo2 *pImageFormatInfo,
VkImageFormatProperties2 *pImageFormatProperties) const {
// Can't wrap AHB-specific validation in a device extension check here, but no harm
bool skip = ValidateGetPhysicalDeviceImageFormatProperties2ANDROID(report_data, pImageFormatInfo, pImageFormatProperties);
return skip;
}
bool CoreChecks::PreCallValidateGetPhysicalDeviceImageFormatProperties2KHR(VkPhysicalDevice physicalDevice,
const VkPhysicalDeviceImageFormatInfo2 *pImageFormatInfo,
VkImageFormatProperties2 *pImageFormatProperties) const {
// Can't wrap AHB-specific validation in a device extension check here, but no harm
bool skip = ValidateGetPhysicalDeviceImageFormatProperties2ANDROID(report_data, pImageFormatInfo, pImageFormatProperties);
return skip;
}
bool CoreChecks::PreCallValidateDestroyPipeline(VkDevice device, VkPipeline pipeline,
const VkAllocationCallbacks *pAllocator) const {
const PIPELINE_STATE *pipeline_state = GetPipelineState(pipeline);
const VulkanTypedHandle obj_struct(pipeline, kVulkanObjectTypePipeline);
bool skip = false;
if (pipeline_state) {
skip |= ValidateObjectNotInUse(pipeline_state, obj_struct, "vkDestroyPipeline", "VUID-vkDestroyPipeline-pipeline-00765");
}
return skip;
}
bool CoreChecks::PreCallValidateDestroySampler(VkDevice device, VkSampler sampler, const VkAllocationCallbacks *pAllocator) const {
const SAMPLER_STATE *sampler_state = GetSamplerState(sampler);
const VulkanTypedHandle obj_struct(sampler, kVulkanObjectTypeSampler);
bool skip = false;
if (sampler_state) {
skip |= ValidateObjectNotInUse(sampler_state, obj_struct, "vkDestroySampler", "VUID-vkDestroySampler-sampler-01082");
}
return skip;
}
bool CoreChecks::PreCallValidateDestroyDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool,
const VkAllocationCallbacks *pAllocator) const {
const DESCRIPTOR_POOL_STATE *desc_pool_state = GetDescriptorPoolState(descriptorPool);
const VulkanTypedHandle obj_struct(descriptorPool, kVulkanObjectTypeDescriptorPool);
bool skip = false;
if (desc_pool_state) {
skip |= ValidateObjectNotInUse(desc_pool_state, obj_struct, "vkDestroyDescriptorPool",
"VUID-vkDestroyDescriptorPool-descriptorPool-00303");
}
return skip;
}
// Verify cmdBuffer in given cb_node is not in global in-flight set, and return skip result
// If this is a secondary command buffer, then make sure its primary is also in-flight
// If primary is not in-flight, then remove secondary from global in-flight set
// This function is only valid at a point when cmdBuffer is being reset or freed
bool CoreChecks::CheckCommandBufferInFlight(const CMD_BUFFER_STATE *cb_node, const char *action, const char *error_code) const {
bool skip = false;
if (cb_node->in_use.load()) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_node->commandBuffer), error_code, "Attempt to %s %s which is in use.", action,
report_data->FormatHandle(cb_node->commandBuffer).c_str());
}
return skip;
}
// Iterate over all cmdBuffers in given commandPool and verify that each is not in use
bool CoreChecks::CheckCommandBuffersInFlight(const COMMAND_POOL_STATE *pPool, const char *action, const char *error_code) const {
bool skip = false;
for (auto cmd_buffer : pPool->commandBuffers) {
skip |= CheckCommandBufferInFlight(GetCBState(cmd_buffer), action, error_code);
}
return skip;
}
bool CoreChecks::PreCallValidateFreeCommandBuffers(VkDevice device, VkCommandPool commandPool, uint32_t commandBufferCount,
const VkCommandBuffer *pCommandBuffers) const {
bool skip = false;
for (uint32_t i = 0; i < commandBufferCount; i++) {
const auto *cb_node = GetCBState(pCommandBuffers[i]);
// Delete CB information structure, and remove from commandBufferMap
if (cb_node) {
skip |= CheckCommandBufferInFlight(cb_node, "free", "VUID-vkFreeCommandBuffers-pCommandBuffers-00047");
}
}
return skip;
}
bool CoreChecks::PreCallValidateCreateCommandPool(VkDevice device, const VkCommandPoolCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkCommandPool *pCommandPool) const {
return ValidateDeviceQueueFamily(pCreateInfo->queueFamilyIndex, "vkCreateCommandPool", "pCreateInfo->queueFamilyIndex",
"VUID-vkCreateCommandPool-queueFamilyIndex-01937");
}
bool CoreChecks::PreCallValidateCreateQueryPool(VkDevice device, const VkQueryPoolCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkQueryPool *pQueryPool) const {
if (disabled.query_validation) return false;
bool skip = false;
if (pCreateInfo && pCreateInfo->queryType == VK_QUERY_TYPE_PIPELINE_STATISTICS) {
if (!enabled_features.core.pipelineStatisticsQuery) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0,
"VUID-VkQueryPoolCreateInfo-queryType-00791",
"Query pool with type VK_QUERY_TYPE_PIPELINE_STATISTICS created on a device with "
"VkDeviceCreateInfo.pEnabledFeatures.pipelineStatisticsQuery == VK_FALSE.");
}
}
if (pCreateInfo && pCreateInfo->queryType == VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR) {
if (!enabled_features.performance_query_features.performanceCounterQueryPools) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0,
"VUID-VkQueryPoolPerformanceCreateInfoKHR-performanceCounterQueryPools-03237",
"Query pool with type VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR created on a device with "
"VkPhysicalDevicePerformanceQueryFeaturesKHR.performanceCounterQueryPools == VK_FALSE.");
}
auto perf_ci = lvl_find_in_chain<VkQueryPoolPerformanceCreateInfoKHR>(pCreateInfo->pNext);
if (!perf_ci) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0,
"VUID-VkQueryPoolCreateInfo-queryType-03222",
"Query pool with type VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR created but the pNext chain of "
"pCreateInfo does not contain in instance of VkQueryPoolPerformanceCreateInfoKHR.");
} else {
const auto &perf_counter_iter = physical_device_state->perf_counters.find(perf_ci->queueFamilyIndex);
if (perf_counter_iter == physical_device_state->perf_counters.end()) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0,
"VUID-VkQueryPoolPerformanceCreateInfoKHR-queueFamilyIndex-03236",
"VkQueryPerformanceCreateInfoKHR::queueFamilyIndex is not a valid queue family index.");
} else {
const QUEUE_FAMILY_PERF_COUNTERS *perf_counters = perf_counter_iter->second.get();
for (uint32_t idx = 0; idx < perf_ci->counterIndexCount; idx++) {
if (perf_ci->pCounterIndices[idx] >= perf_counters->counters.size()) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0,
"VUID-VkQueryPoolPerformanceCreateInfoKHR-pCounterIndices-03321",
"VkQueryPerformanceCreateInfoKHR::pCounterIndices[%u] = %u is not a valid "
"counter index.",
idx, perf_ci->pCounterIndices[idx]);
}
}
}
}
}
return skip;
}
bool CoreChecks::PreCallValidateDestroyCommandPool(VkDevice device, VkCommandPool commandPool,
const VkAllocationCallbacks *pAllocator) const {
const COMMAND_POOL_STATE *cp_state = GetCommandPoolState(commandPool);
bool skip = false;
if (cp_state) {
// Verify that command buffers in pool are complete (not in-flight)
skip |= CheckCommandBuffersInFlight(cp_state, "destroy command pool with", "VUID-vkDestroyCommandPool-commandPool-00041");
}
return skip;
}
bool CoreChecks::PreCallValidateResetCommandPool(VkDevice device, VkCommandPool commandPool, VkCommandPoolResetFlags flags) const {
const auto *command_pool_state = GetCommandPoolState(commandPool);
return CheckCommandBuffersInFlight(command_pool_state, "reset command pool with", "VUID-vkResetCommandPool-commandPool-00040");
}
bool CoreChecks::PreCallValidateResetFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences) const {
bool skip = false;
for (uint32_t i = 0; i < fenceCount; ++i) {
const auto pFence = GetFenceState(pFences[i]);
if (pFence && pFence->scope == kSyncScopeInternal && pFence->state == FENCE_INFLIGHT) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
HandleToUint64(pFences[i]), "VUID-vkResetFences-pFences-01123", "%s is in use.",
report_data->FormatHandle(pFences[i]).c_str());
}
}
return skip;
}
bool CoreChecks::PreCallValidateDestroyFramebuffer(VkDevice device, VkFramebuffer framebuffer,
const VkAllocationCallbacks *pAllocator) const {
const FRAMEBUFFER_STATE *framebuffer_state = GetFramebufferState(framebuffer);
const VulkanTypedHandle obj_struct(framebuffer, kVulkanObjectTypeFramebuffer);
bool skip = false;
if (framebuffer_state) {
skip |= ValidateObjectNotInUse(framebuffer_state, obj_struct, "vkDestroyFramebuffer",
"VUID-vkDestroyFramebuffer-framebuffer-00892");
}
return skip;
}
bool CoreChecks::PreCallValidateDestroyRenderPass(VkDevice device, VkRenderPass renderPass,
const VkAllocationCallbacks *pAllocator) const {
const RENDER_PASS_STATE *rp_state = GetRenderPassState(renderPass);
const VulkanTypedHandle obj_struct(renderPass, kVulkanObjectTypeRenderPass);
bool skip = false;
if (rp_state) {
skip |= ValidateObjectNotInUse(rp_state, obj_struct, "vkDestroyRenderPass", "VUID-vkDestroyRenderPass-renderPass-00873");
}
return skip;
}
// Access helper functions for external modules
VkFormatProperties CoreChecks::GetPDFormatProperties(const VkFormat format) const {
VkFormatProperties format_properties;
DispatchGetPhysicalDeviceFormatProperties(physical_device, format, &format_properties);
return format_properties;
}
bool CoreChecks::ValidatePipelineVertexDivisors(std::vector<std::shared_ptr<PIPELINE_STATE>> const &pipe_state_vec,
const uint32_t count, const VkGraphicsPipelineCreateInfo *pipe_cis) const {
bool skip = false;
const VkPhysicalDeviceLimits *device_limits = &phys_dev_props.limits;
for (uint32_t i = 0; i < count; i++) {
auto pvids_ci = lvl_find_in_chain<VkPipelineVertexInputDivisorStateCreateInfoEXT>(pipe_cis[i].pVertexInputState->pNext);
if (nullptr == pvids_ci) continue;
const PIPELINE_STATE *pipe_state = pipe_state_vec[i].get();
for (uint32_t j = 0; j < pvids_ci->vertexBindingDivisorCount; j++) {
const VkVertexInputBindingDivisorDescriptionEXT *vibdd = &(pvids_ci->pVertexBindingDivisors[j]);
if (vibdd->binding >= device_limits->maxVertexInputBindings) {
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device),
"VUID-VkVertexInputBindingDivisorDescriptionEXT-binding-01869",
"vkCreateGraphicsPipelines(): Pipeline[%1u] with chained VkPipelineVertexInputDivisorStateCreateInfoEXT, "
"pVertexBindingDivisors[%1u] binding index of (%1u) exceeds device maxVertexInputBindings (%1u).",
i, j, vibdd->binding, device_limits->maxVertexInputBindings);
}
if (vibdd->divisor > phys_dev_ext_props.vtx_attrib_divisor_props.maxVertexAttribDivisor) {
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device),
"VUID-VkVertexInputBindingDivisorDescriptionEXT-divisor-01870",
"vkCreateGraphicsPipelines(): Pipeline[%1u] with chained VkPipelineVertexInputDivisorStateCreateInfoEXT, "
"pVertexBindingDivisors[%1u] divisor of (%1u) exceeds extension maxVertexAttribDivisor (%1u).",
i, j, vibdd->divisor, phys_dev_ext_props.vtx_attrib_divisor_props.maxVertexAttribDivisor);
}
if ((0 == vibdd->divisor) && !enabled_features.vtx_attrib_divisor_features.vertexAttributeInstanceRateZeroDivisor) {
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device),
"VUID-VkVertexInputBindingDivisorDescriptionEXT-vertexAttributeInstanceRateZeroDivisor-02228",
"vkCreateGraphicsPipelines(): Pipeline[%1u] with chained VkPipelineVertexInputDivisorStateCreateInfoEXT, "
"pVertexBindingDivisors[%1u] divisor must not be 0 when vertexAttributeInstanceRateZeroDivisor feature is not "
"enabled.",
i, j);
}
if ((1 != vibdd->divisor) && !enabled_features.vtx_attrib_divisor_features.vertexAttributeInstanceRateDivisor) {
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device),
"VUID-VkVertexInputBindingDivisorDescriptionEXT-vertexAttributeInstanceRateDivisor-02229",
"vkCreateGraphicsPipelines(): Pipeline[%1u] with chained VkPipelineVertexInputDivisorStateCreateInfoEXT, "
"pVertexBindingDivisors[%1u] divisor (%1u) must be 1 when vertexAttributeInstanceRateDivisor feature is not "
"enabled.",
i, j, vibdd->divisor);
}
// Find the corresponding binding description and validate input rate setting
bool failed_01871 = true;
for (size_t k = 0; k < pipe_state->vertex_binding_descriptions_.size(); k++) {
if ((vibdd->binding == pipe_state->vertex_binding_descriptions_[k].binding) &&
(VK_VERTEX_INPUT_RATE_INSTANCE == pipe_state->vertex_binding_descriptions_[k].inputRate)) {
failed_01871 = false;
break;
}
}
if (failed_01871) { // Description not found, or has incorrect inputRate value
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device),
"VUID-VkVertexInputBindingDivisorDescriptionEXT-inputRate-01871",
"vkCreateGraphicsPipelines(): Pipeline[%1u] with chained VkPipelineVertexInputDivisorStateCreateInfoEXT, "
"pVertexBindingDivisors[%1u] specifies binding index (%1u), but that binding index's "
"VkVertexInputBindingDescription.inputRate member is not VK_VERTEX_INPUT_RATE_INSTANCE.",
i, j, vibdd->binding);
}
}
}
return skip;
}
bool CoreChecks::PreCallValidateCreateGraphicsPipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
const VkGraphicsPipelineCreateInfo *pCreateInfos,
const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines,
void *cgpl_state_data) const {
bool skip = StateTracker::PreCallValidateCreateGraphicsPipelines(device, pipelineCache, count, pCreateInfos, pAllocator,
pPipelines, cgpl_state_data);
create_graphics_pipeline_api_state *cgpl_state = reinterpret_cast<create_graphics_pipeline_api_state *>(cgpl_state_data);
for (uint32_t i = 0; i < count; i++) {
skip |= ValidatePipelineLocked(cgpl_state->pipe_state, i);
}
for (uint32_t i = 0; i < count; i++) {
skip |= ValidatePipelineUnlocked(cgpl_state->pipe_state[i].get(), i);
}
if (device_extensions.vk_ext_vertex_attribute_divisor) {
skip |= ValidatePipelineVertexDivisors(cgpl_state->pipe_state, count, pCreateInfos);
}
return skip;
}
bool CoreChecks::PreCallValidateCreateComputePipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
const VkComputePipelineCreateInfo *pCreateInfos,
const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines,
void *ccpl_state_data) const {
bool skip = StateTracker::PreCallValidateCreateComputePipelines(device, pipelineCache, count, pCreateInfos, pAllocator,
pPipelines, ccpl_state_data);
auto *ccpl_state = reinterpret_cast<create_compute_pipeline_api_state *>(ccpl_state_data);
for (uint32_t i = 0; i < count; i++) {
// TODO: Add Compute Pipeline Verification
skip |= ValidateComputePipeline(ccpl_state->pipe_state.back().get());
}
return skip;
}
bool CoreChecks::PreCallValidateCreateRayTracingPipelinesNV(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
const VkRayTracingPipelineCreateInfoNV *pCreateInfos,
const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines,
void *crtpl_state_data) const {
bool skip = StateTracker::PreCallValidateCreateRayTracingPipelinesNV(device, pipelineCache, count, pCreateInfos, pAllocator,
pPipelines, crtpl_state_data);
auto *crtpl_state = reinterpret_cast<create_ray_tracing_pipeline_api_state *>(crtpl_state_data);
for (uint32_t i = 0; i < count; i++) {
skip |= ValidateRayTracingPipelineNV(crtpl_state->pipe_state[i].get());
}
return skip;
}
bool CoreChecks::PreCallValidateGetPipelineExecutablePropertiesKHR(VkDevice device, const VkPipelineInfoKHR *pPipelineInfo,
uint32_t *pExecutableCount,
VkPipelineExecutablePropertiesKHR *pProperties) const {
bool skip = false;
if (!enabled_features.pipeline_exe_props_features.pipelineExecutableInfo) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device),
"VUID-vkGetPipelineExecutablePropertiesKHR-pipelineExecutableInfo-03270",
"vkGetPipelineExecutablePropertiesKHR called when pipelineExecutableInfo feature is not enabled.");
}
return skip;
}
bool CoreChecks::ValidatePipelineExecutableInfo(VkDevice device, const VkPipelineExecutableInfoKHR *pExecutableInfo) const {
bool skip = false;
if (!enabled_features.pipeline_exe_props_features.pipelineExecutableInfo) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device),
"VUID-vkGetPipelineExecutableStatisticsKHR-pipelineExecutableInfo-03272",
"vkGetPipelineExecutableStatisticsKHR called when pipelineExecutableInfo feature is not enabled.");
}
VkPipelineInfoKHR pi = {};
pi.sType = VK_STRUCTURE_TYPE_PIPELINE_INFO_KHR;
pi.pipeline = pExecutableInfo->pipeline;
// We could probably cache this instead of fetching it every time
uint32_t executableCount = 0;
DispatchGetPipelineExecutablePropertiesKHR(device, &pi, &executableCount, NULL);
if (pExecutableInfo->executableIndex >= executableCount) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
HandleToUint64(pExecutableInfo->pipeline), "VUID-VkPipelineExecutableInfoKHR-executableIndex-03275",
"VkPipelineExecutableInfo::executableIndex (%1u) must be less than the number of executables associated with "
"the pipeline (%1u) as returned by vkGetPipelineExecutablePropertiessKHR",
pExecutableInfo->executableIndex, executableCount);
}
return skip;
}
bool CoreChecks::PreCallValidateGetPipelineExecutableStatisticsKHR(VkDevice device,
const VkPipelineExecutableInfoKHR *pExecutableInfo,
uint32_t *pStatisticCount,
VkPipelineExecutableStatisticKHR *pStatistics) const {
bool skip = ValidatePipelineExecutableInfo(device, pExecutableInfo);
const PIPELINE_STATE *pipeline_state = GetPipelineState(pExecutableInfo->pipeline);
if (!(pipeline_state->getPipelineCreateFlags() & VK_PIPELINE_CREATE_CAPTURE_STATISTICS_BIT_KHR)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
HandleToUint64(pExecutableInfo->pipeline), "VUID-vkGetPipelineExecutableStatisticsKHR-pipeline-03274",
"vkGetPipelineExecutableStatisticsKHR called on a pipeline created without the "
"VK_PIPELINE_CREATE_CAPTURE_STATISTICS_BIT_KHR flag set");
}
return skip;
}
bool CoreChecks::PreCallValidateGetPipelineExecutableInternalRepresentationsKHR(
VkDevice device, const VkPipelineExecutableInfoKHR *pExecutableInfo, uint32_t *pInternalRepresentationCount,
VkPipelineExecutableInternalRepresentationKHR *pStatistics) const {
bool skip = ValidatePipelineExecutableInfo(device, pExecutableInfo);
const PIPELINE_STATE *pipeline_state = GetPipelineState(pExecutableInfo->pipeline);
if (!(pipeline_state->getPipelineCreateFlags() & VK_PIPELINE_CREATE_CAPTURE_INTERNAL_REPRESENTATIONS_BIT_KHR)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
HandleToUint64(pExecutableInfo->pipeline),
"VUID-vkGetPipelineExecutableInternalRepresentationsKHR-pipeline-03278",
"vkGetPipelineExecutableInternalRepresentationsKHR called on a pipeline created without the "
"VK_PIPELINE_CREATE_CAPTURE_INTERNAL_REPRESENTATIONS_BIT_KHR flag set");
}
return skip;
}
bool CoreChecks::PreCallValidateCreateDescriptorSetLayout(VkDevice device, const VkDescriptorSetLayoutCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkDescriptorSetLayout *pSetLayout) const {
return cvdescriptorset::ValidateDescriptorSetLayoutCreateInfo(
report_data, pCreateInfo, IsExtEnabled(device_extensions.vk_khr_push_descriptor), phys_dev_ext_props.max_push_descriptors,
IsExtEnabled(device_extensions.vk_ext_descriptor_indexing), &enabled_features.descriptor_indexing,
&enabled_features.inline_uniform_block, &phys_dev_ext_props.inline_uniform_block_props, &device_extensions);
}
// Used by CreatePipelineLayout and CmdPushConstants.
// Note that the index argument is optional and only used by CreatePipelineLayout.
bool CoreChecks::ValidatePushConstantRange(const uint32_t offset, const uint32_t size, const char *caller_name,
uint32_t index = 0) const {
if (disabled.push_constant_range) return false;
uint32_t const maxPushConstantsSize = phys_dev_props.limits.maxPushConstantsSize;
bool skip = false;
// Check that offset + size don't exceed the max.
// Prevent arithetic overflow here by avoiding addition and testing in this order.
if ((offset >= maxPushConstantsSize) || (size > maxPushConstantsSize - offset)) {
// This is a pain just to adapt the log message to the caller, but better to sort it out only when there is a problem.
if (0 == strcmp(caller_name, "vkCreatePipelineLayout()")) {
if (offset >= maxPushConstantsSize) {
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPushConstantRange-offset-00294",
"%s call has push constants index %u with offset %u that exceeds this device's maxPushConstantSize of %u.",
caller_name, index, offset, maxPushConstantsSize);
}
if (size > maxPushConstantsSize - offset) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPushConstantRange-size-00298",
"%s call has push constants index %u with offset %u and size %u that exceeds this device's "
"maxPushConstantSize of %u.",
caller_name, index, offset, size, maxPushConstantsSize);
}
} else if (0 == strcmp(caller_name, "vkCmdPushConstants()")) {
if (offset >= maxPushConstantsSize) {
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-vkCmdPushConstants-offset-00370",
"%s call has push constants index %u with offset %u that exceeds this device's maxPushConstantSize of %u.",
caller_name, index, offset, maxPushConstantsSize);
}
if (size > maxPushConstantsSize - offset) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-vkCmdPushConstants-size-00371",
"%s call has push constants index %u with offset %u and size %u that exceeds this device's "
"maxPushConstantSize of %u.",
caller_name, index, offset, size, maxPushConstantsSize);
}
} else {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
kVUID_Core_DrawState_InternalError, "%s caller not supported.", caller_name);
}
}
// size needs to be non-zero and a multiple of 4.
if ((size == 0) || ((size & 0x3) != 0)) {
if (0 == strcmp(caller_name, "vkCreatePipelineLayout()")) {
if (size == 0) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPushConstantRange-size-00296",
"%s call has push constants index %u with size %u. Size must be greater than zero.", caller_name,
index, size);
}
if (size & 0x3) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPushConstantRange-size-00297",
"%s call has push constants index %u with size %u. Size must be a multiple of 4.", caller_name,
index, size);
}
} else if (0 == strcmp(caller_name, "vkCmdPushConstants()")) {
if (size == 0) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-vkCmdPushConstants-size-arraylength",
"%s call has push constants index %u with size %u. Size must be greater than zero.", caller_name,
index, size);
}
if (size & 0x3) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-vkCmdPushConstants-size-00369",
"%s call has push constants index %u with size %u. Size must be a multiple of 4.", caller_name,
index, size);
}
} else {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
kVUID_Core_DrawState_InternalError, "%s caller not supported.", caller_name);
}
}
// offset needs to be a multiple of 4.
if ((offset & 0x3) != 0) {
if (0 == strcmp(caller_name, "vkCreatePipelineLayout()")) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPushConstantRange-offset-00295",
"%s call has push constants index %u with offset %u. Offset must be a multiple of 4.", caller_name,
index, offset);
} else if (0 == strcmp(caller_name, "vkCmdPushConstants()")) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-vkCmdPushConstants-offset-00368",
"%s call has push constants with offset %u. Offset must be a multiple of 4.", caller_name, offset);
} else {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
kVUID_Core_DrawState_InternalError, "%s caller not supported.", caller_name);
}
}
return skip;
}
enum DSL_DESCRIPTOR_GROUPS {
DSL_TYPE_SAMPLERS = 0,
DSL_TYPE_UNIFORM_BUFFERS,
DSL_TYPE_STORAGE_BUFFERS,
DSL_TYPE_SAMPLED_IMAGES,
DSL_TYPE_STORAGE_IMAGES,
DSL_TYPE_INPUT_ATTACHMENTS,
DSL_TYPE_INLINE_UNIFORM_BLOCK,
DSL_NUM_DESCRIPTOR_GROUPS
};
// Used by PreCallValidateCreatePipelineLayout.
// Returns an array of size DSL_NUM_DESCRIPTOR_GROUPS of the maximum number of descriptors used in any single pipeline stage
std::valarray<uint32_t> GetDescriptorCountMaxPerStage(
const DeviceFeatures *enabled_features,
const std::vector<std::shared_ptr<cvdescriptorset::DescriptorSetLayout const>> &set_layouts, bool skip_update_after_bind) {
// Identify active pipeline stages
std::vector<VkShaderStageFlags> stage_flags = {VK_SHADER_STAGE_VERTEX_BIT, VK_SHADER_STAGE_FRAGMENT_BIT,
VK_SHADER_STAGE_COMPUTE_BIT};
if (enabled_features->core.geometryShader) {
stage_flags.push_back(VK_SHADER_STAGE_GEOMETRY_BIT);
}
if (enabled_features->core.tessellationShader) {
stage_flags.push_back(VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT);
stage_flags.push_back(VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT);
}
// Allow iteration over enum values
std::vector<DSL_DESCRIPTOR_GROUPS> dsl_groups = {
DSL_TYPE_SAMPLERS, DSL_TYPE_UNIFORM_BUFFERS, DSL_TYPE_STORAGE_BUFFERS, DSL_TYPE_SAMPLED_IMAGES,
DSL_TYPE_STORAGE_IMAGES, DSL_TYPE_INPUT_ATTACHMENTS, DSL_TYPE_INLINE_UNIFORM_BLOCK};
// Sum by layouts per stage, then pick max of stages per type
std::valarray<uint32_t> max_sum(0U, DSL_NUM_DESCRIPTOR_GROUPS); // max descriptor sum among all pipeline stages
for (auto stage : stage_flags) {
std::valarray<uint32_t> stage_sum(0U, DSL_NUM_DESCRIPTOR_GROUPS); // per-stage sums
for (auto dsl : set_layouts) {
if (skip_update_after_bind &&
(dsl->GetCreateFlags() & VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT_EXT)) {
continue;
}
for (uint32_t binding_idx = 0; binding_idx < dsl->GetBindingCount(); binding_idx++) {
const VkDescriptorSetLayoutBinding *binding = dsl->GetDescriptorSetLayoutBindingPtrFromIndex(binding_idx);
// Bindings with a descriptorCount of 0 are "reserved" and should be skipped
if (0 != (stage & binding->stageFlags) && binding->descriptorCount > 0) {
switch (binding->descriptorType) {
case VK_DESCRIPTOR_TYPE_SAMPLER:
stage_sum[DSL_TYPE_SAMPLERS] += binding->descriptorCount;
break;
case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
stage_sum[DSL_TYPE_UNIFORM_BUFFERS] += binding->descriptorCount;
break;
case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
stage_sum[DSL_TYPE_STORAGE_BUFFERS] += binding->descriptorCount;
break;
case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
stage_sum[DSL_TYPE_SAMPLED_IMAGES] += binding->descriptorCount;
break;
case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
stage_sum[DSL_TYPE_STORAGE_IMAGES] += binding->descriptorCount;
break;
case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
stage_sum[DSL_TYPE_SAMPLED_IMAGES] += binding->descriptorCount;
stage_sum[DSL_TYPE_SAMPLERS] += binding->descriptorCount;
break;
case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
stage_sum[DSL_TYPE_INPUT_ATTACHMENTS] += binding->descriptorCount;
break;
case VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT:
// count one block per binding. descriptorCount is number of bytes
stage_sum[DSL_TYPE_INLINE_UNIFORM_BLOCK]++;
break;
default:
break;
}
}
}
}
for (auto type : dsl_groups) {
max_sum[type] = std::max(stage_sum[type], max_sum[type]);
}
}
return max_sum;
}
// Used by PreCallValidateCreatePipelineLayout.
// Returns a map indexed by VK_DESCRIPTOR_TYPE_* enum of the summed descriptors by type.
// Note: descriptors only count against the limit once even if used by multiple stages.
std::map<uint32_t, uint32_t> GetDescriptorSum(
const std::vector<std::shared_ptr<cvdescriptorset::DescriptorSetLayout const>> &set_layouts, bool skip_update_after_bind) {
std::map<uint32_t, uint32_t> sum_by_type;
for (auto dsl : set_layouts) {
if (skip_update_after_bind && (dsl->GetCreateFlags() & VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT_EXT)) {
continue;
}
for (uint32_t binding_idx = 0; binding_idx < dsl->GetBindingCount(); binding_idx++) {
const VkDescriptorSetLayoutBinding *binding = dsl->GetDescriptorSetLayoutBindingPtrFromIndex(binding_idx);
// Bindings with a descriptorCount of 0 are "reserved" and should be skipped
if (binding->descriptorCount > 0) {
if (binding->descriptorType == VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT) {
// count one block per binding. descriptorCount is number of bytes
sum_by_type[binding->descriptorType]++;
} else {
sum_by_type[binding->descriptorType] += binding->descriptorCount;
}
}
}
}
return sum_by_type;
}
bool CoreChecks::PreCallValidateCreatePipelineLayout(VkDevice device, const VkPipelineLayoutCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkPipelineLayout *pPipelineLayout) const {
bool skip = false;
// Validate layout count against device physical limit
if (pCreateInfo->setLayoutCount > phys_dev_props.limits.maxBoundDescriptorSets) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPipelineLayoutCreateInfo-setLayoutCount-00286",
"vkCreatePipelineLayout(): setLayoutCount (%d) exceeds physical device maxBoundDescriptorSets limit (%d).",
pCreateInfo->setLayoutCount, phys_dev_props.limits.maxBoundDescriptorSets);
}
// Validate Push Constant ranges
uint32_t i, j;
for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
skip |= ValidatePushConstantRange(pCreateInfo->pPushConstantRanges[i].offset, pCreateInfo->pPushConstantRanges[i].size,
"vkCreatePipelineLayout()", i);
if (0 == pCreateInfo->pPushConstantRanges[i].stageFlags) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPushConstantRange-stageFlags-requiredbitmask",
"vkCreatePipelineLayout() call has no stageFlags set.");
}
}
// As of 1.0.28, there is a VU that states that a stage flag cannot appear more than once in the list of push constant ranges.
for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
for (j = i + 1; j < pCreateInfo->pushConstantRangeCount; ++j) {
if (0 != (pCreateInfo->pPushConstantRanges[i].stageFlags & pCreateInfo->pPushConstantRanges[j].stageFlags)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPipelineLayoutCreateInfo-pPushConstantRanges-00292",
"vkCreatePipelineLayout() Duplicate stage flags found in ranges %d and %d.", i, j);
}
}
}
// Early-out
if (skip) return skip;
std::vector<std::shared_ptr<cvdescriptorset::DescriptorSetLayout const>> set_layouts(pCreateInfo->setLayoutCount, nullptr);
unsigned int push_descriptor_set_count = 0;
{
for (i = 0; i < pCreateInfo->setLayoutCount; ++i) {
set_layouts[i] = GetDescriptorSetLayoutShared(pCreateInfo->pSetLayouts[i]);
if (set_layouts[i]->IsPushDescriptor()) ++push_descriptor_set_count;
}
}
if (push_descriptor_set_count > 1) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00293",
"vkCreatePipelineLayout() Multiple push descriptor sets found.");
}
// Max descriptors by type, within a single pipeline stage
std::valarray<uint32_t> max_descriptors_per_stage = GetDescriptorCountMaxPerStage(&enabled_features, set_layouts, true);
// Samplers
if (max_descriptors_per_stage[DSL_TYPE_SAMPLERS] > phys_dev_props.limits.maxPerStageDescriptorSamplers) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00287",
"vkCreatePipelineLayout(): max per-stage sampler bindings count (%d) exceeds device "
"maxPerStageDescriptorSamplers limit (%d).",
max_descriptors_per_stage[DSL_TYPE_SAMPLERS], phys_dev_props.limits.maxPerStageDescriptorSamplers);
}
// Uniform buffers
if (max_descriptors_per_stage[DSL_TYPE_UNIFORM_BUFFERS] > phys_dev_props.limits.maxPerStageDescriptorUniformBuffers) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00288",
"vkCreatePipelineLayout(): max per-stage uniform buffer bindings count (%d) exceeds device "
"maxPerStageDescriptorUniformBuffers limit (%d).",
max_descriptors_per_stage[DSL_TYPE_UNIFORM_BUFFERS], phys_dev_props.limits.maxPerStageDescriptorUniformBuffers);
}
// Storage buffers
if (max_descriptors_per_stage[DSL_TYPE_STORAGE_BUFFERS] > phys_dev_props.limits.maxPerStageDescriptorStorageBuffers) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00289",
"vkCreatePipelineLayout(): max per-stage storage buffer bindings count (%d) exceeds device "
"maxPerStageDescriptorStorageBuffers limit (%d).",
max_descriptors_per_stage[DSL_TYPE_STORAGE_BUFFERS], phys_dev_props.limits.maxPerStageDescriptorStorageBuffers);
}
// Sampled images
if (max_descriptors_per_stage[DSL_TYPE_SAMPLED_IMAGES] > phys_dev_props.limits.maxPerStageDescriptorSampledImages) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00290",
"vkCreatePipelineLayout(): max per-stage sampled image bindings count (%d) exceeds device "
"maxPerStageDescriptorSampledImages limit (%d).",
max_descriptors_per_stage[DSL_TYPE_SAMPLED_IMAGES], phys_dev_props.limits.maxPerStageDescriptorSampledImages);
}
// Storage images
if (max_descriptors_per_stage[DSL_TYPE_STORAGE_IMAGES] > phys_dev_props.limits.maxPerStageDescriptorStorageImages) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00291",
"vkCreatePipelineLayout(): max per-stage storage image bindings count (%d) exceeds device "
"maxPerStageDescriptorStorageImages limit (%d).",
max_descriptors_per_stage[DSL_TYPE_STORAGE_IMAGES], phys_dev_props.limits.maxPerStageDescriptorStorageImages);
}
// Input attachments
if (max_descriptors_per_stage[DSL_TYPE_INPUT_ATTACHMENTS] > phys_dev_props.limits.maxPerStageDescriptorInputAttachments) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01676",
"vkCreatePipelineLayout(): max per-stage input attachment bindings count (%d) exceeds device "
"maxPerStageDescriptorInputAttachments limit (%d).",
max_descriptors_per_stage[DSL_TYPE_INPUT_ATTACHMENTS],
phys_dev_props.limits.maxPerStageDescriptorInputAttachments);
}
// Inline uniform blocks
if (max_descriptors_per_stage[DSL_TYPE_INLINE_UNIFORM_BLOCK] >
phys_dev_ext_props.inline_uniform_block_props.maxPerStageDescriptorInlineUniformBlocks) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPipelineLayoutCreateInfo-descriptorType-02214",
"vkCreatePipelineLayout(): max per-stage inline uniform block bindings count (%d) exceeds device "
"maxPerStageDescriptorInlineUniformBlocks limit (%d).",
max_descriptors_per_stage[DSL_TYPE_INLINE_UNIFORM_BLOCK],
phys_dev_ext_props.inline_uniform_block_props.maxPerStageDescriptorInlineUniformBlocks);
}
// Total descriptors by type
//
std::map<uint32_t, uint32_t> sum_all_stages = GetDescriptorSum(set_layouts, true);
// Samplers
uint32_t sum = sum_all_stages[VK_DESCRIPTOR_TYPE_SAMPLER] + sum_all_stages[VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER];
if (sum > phys_dev_props.limits.maxDescriptorSetSamplers) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01677",
"vkCreatePipelineLayout(): sum of sampler bindings among all stages (%d) exceeds device "
"maxDescriptorSetSamplers limit (%d).",
sum, phys_dev_props.limits.maxDescriptorSetSamplers);
}
// Uniform buffers
if (sum_all_stages[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER] > phys_dev_props.limits.maxDescriptorSetUniformBuffers) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01678",
"vkCreatePipelineLayout(): sum of uniform buffer bindings among all stages (%d) exceeds device "
"maxDescriptorSetUniformBuffers limit (%d).",
sum_all_stages[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER], phys_dev_props.limits.maxDescriptorSetUniformBuffers);
}
// Dynamic uniform buffers
if (sum_all_stages[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC] > phys_dev_props.limits.maxDescriptorSetUniformBuffersDynamic) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01679",
"vkCreatePipelineLayout(): sum of dynamic uniform buffer bindings among all stages (%d) exceeds device "
"maxDescriptorSetUniformBuffersDynamic limit (%d).",
sum_all_stages[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC],
phys_dev_props.limits.maxDescriptorSetUniformBuffersDynamic);
}
// Storage buffers
if (sum_all_stages[VK_DESCRIPTOR_TYPE_STORAGE_BUFFER] > phys_dev_props.limits.maxDescriptorSetStorageBuffers) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01680",
"vkCreatePipelineLayout(): sum of storage buffer bindings among all stages (%d) exceeds device "
"maxDescriptorSetStorageBuffers limit (%d).",
sum_all_stages[VK_DESCRIPTOR_TYPE_STORAGE_BUFFER], phys_dev_props.limits.maxDescriptorSetStorageBuffers);
}
// Dynamic storage buffers
if (sum_all_stages[VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC] > phys_dev_props.limits.maxDescriptorSetStorageBuffersDynamic) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01681",
"vkCreatePipelineLayout(): sum of dynamic storage buffer bindings among all stages (%d) exceeds device "
"maxDescriptorSetStorageBuffersDynamic limit (%d).",
sum_all_stages[VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC],
phys_dev_props.limits.maxDescriptorSetStorageBuffersDynamic);
}
// Sampled images
sum = sum_all_stages[VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE] + sum_all_stages[VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER] +
sum_all_stages[VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER];
if (sum > phys_dev_props.limits.maxDescriptorSetSampledImages) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01682",
"vkCreatePipelineLayout(): sum of sampled image bindings among all stages (%d) exceeds device "
"maxDescriptorSetSampledImages limit (%d).",
sum, phys_dev_props.limits.maxDescriptorSetSampledImages);
}
// Storage images
sum = sum_all_stages[VK_DESCRIPTOR_TYPE_STORAGE_IMAGE] + sum_all_stages[VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER];
if (sum > phys_dev_props.limits.maxDescriptorSetStorageImages) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01683",
"vkCreatePipelineLayout(): sum of storage image bindings among all stages (%d) exceeds device "
"maxDescriptorSetStorageImages limit (%d).",
sum, phys_dev_props.limits.maxDescriptorSetStorageImages);
}
// Input attachments
if (sum_all_stages[VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT] > phys_dev_props.limits.maxDescriptorSetInputAttachments) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01684",
"vkCreatePipelineLayout(): sum of input attachment bindings among all stages (%d) exceeds device "
"maxDescriptorSetInputAttachments limit (%d).",
sum_all_stages[VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT], phys_dev_props.limits.maxDescriptorSetInputAttachments);
}
// Inline uniform blocks
if (sum_all_stages[VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT] >
phys_dev_ext_props.inline_uniform_block_props.maxDescriptorSetInlineUniformBlocks) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPipelineLayoutCreateInfo-descriptorType-02216",
"vkCreatePipelineLayout(): sum of inline uniform block bindings among all stages (%d) exceeds device "
"maxDescriptorSetInlineUniformBlocks limit (%d).",
sum_all_stages[VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT],
phys_dev_ext_props.inline_uniform_block_props.maxDescriptorSetInlineUniformBlocks);
}
if (device_extensions.vk_ext_descriptor_indexing) {
// XXX TODO: replace with correct VU messages
// Max descriptors by type, within a single pipeline stage
std::valarray<uint32_t> max_descriptors_per_stage_update_after_bind =
GetDescriptorCountMaxPerStage(&enabled_features, set_layouts, false);
// Samplers
if (max_descriptors_per_stage_update_after_bind[DSL_TYPE_SAMPLERS] >
phys_dev_ext_props.descriptor_indexing_props.maxPerStageDescriptorUpdateAfterBindSamplers) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPipelineLayoutCreateInfo-descriptorType-03022",
"vkCreatePipelineLayout(): max per-stage sampler bindings count (%d) exceeds device "
"maxPerStageDescriptorUpdateAfterBindSamplers limit (%d).",
max_descriptors_per_stage_update_after_bind[DSL_TYPE_SAMPLERS],
phys_dev_ext_props.descriptor_indexing_props.maxPerStageDescriptorUpdateAfterBindSamplers);
}
// Uniform buffers
if (max_descriptors_per_stage_update_after_bind[DSL_TYPE_UNIFORM_BUFFERS] >
phys_dev_ext_props.descriptor_indexing_props.maxPerStageDescriptorUpdateAfterBindUniformBuffers) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPipelineLayoutCreateInfo-descriptorType-03023",
"vkCreatePipelineLayout(): max per-stage uniform buffer bindings count (%d) exceeds device "
"maxPerStageDescriptorUpdateAfterBindUniformBuffers limit (%d).",
max_descriptors_per_stage_update_after_bind[DSL_TYPE_UNIFORM_BUFFERS],
phys_dev_ext_props.descriptor_indexing_props.maxPerStageDescriptorUpdateAfterBindUniformBuffers);
}
// Storage buffers
if (max_descriptors_per_stage_update_after_bind[DSL_TYPE_STORAGE_BUFFERS] >
phys_dev_ext_props.descriptor_indexing_props.maxPerStageDescriptorUpdateAfterBindStorageBuffers) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPipelineLayoutCreateInfo-descriptorType-03024",
"vkCreatePipelineLayout(): max per-stage storage buffer bindings count (%d) exceeds device "
"maxPerStageDescriptorUpdateAfterBindStorageBuffers limit (%d).",
max_descriptors_per_stage_update_after_bind[DSL_TYPE_STORAGE_BUFFERS],
phys_dev_ext_props.descriptor_indexing_props.maxPerStageDescriptorUpdateAfterBindStorageBuffers);
}
// Sampled images
if (max_descriptors_per_stage_update_after_bind[DSL_TYPE_SAMPLED_IMAGES] >
phys_dev_ext_props.descriptor_indexing_props.maxPerStageDescriptorUpdateAfterBindSampledImages) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPipelineLayoutCreateInfo-descriptorType-03025",
"vkCreatePipelineLayout(): max per-stage sampled image bindings count (%d) exceeds device "
"maxPerStageDescriptorUpdateAfterBindSampledImages limit (%d).",
max_descriptors_per_stage_update_after_bind[DSL_TYPE_SAMPLED_IMAGES],
phys_dev_ext_props.descriptor_indexing_props.maxPerStageDescriptorUpdateAfterBindSampledImages);
}
// Storage images
if (max_descriptors_per_stage_update_after_bind[DSL_TYPE_STORAGE_IMAGES] >
phys_dev_ext_props.descriptor_indexing_props.maxPerStageDescriptorUpdateAfterBindStorageImages) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPipelineLayoutCreateInfo-descriptorType-03026",
"vkCreatePipelineLayout(): max per-stage storage image bindings count (%d) exceeds device "
"maxPerStageDescriptorUpdateAfterBindStorageImages limit (%d).",
max_descriptors_per_stage_update_after_bind[DSL_TYPE_STORAGE_IMAGES],
phys_dev_ext_props.descriptor_indexing_props.maxPerStageDescriptorUpdateAfterBindStorageImages);
}
// Input attachments
if (max_descriptors_per_stage_update_after_bind[DSL_TYPE_INPUT_ATTACHMENTS] >
phys_dev_ext_props.descriptor_indexing_props.maxPerStageDescriptorUpdateAfterBindInputAttachments) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPipelineLayoutCreateInfo-descriptorType-03027",
"vkCreatePipelineLayout(): max per-stage input attachment bindings count (%d) exceeds device "
"maxPerStageDescriptorUpdateAfterBindInputAttachments limit (%d).",
max_descriptors_per_stage_update_after_bind[DSL_TYPE_INPUT_ATTACHMENTS],
phys_dev_ext_props.descriptor_indexing_props.maxPerStageDescriptorUpdateAfterBindInputAttachments);
}
// Inline uniform blocks
if (max_descriptors_per_stage_update_after_bind[DSL_TYPE_INLINE_UNIFORM_BLOCK] >
phys_dev_ext_props.inline_uniform_block_props.maxPerStageDescriptorUpdateAfterBindInlineUniformBlocks) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPipelineLayoutCreateInfo-descriptorType-02215",
"vkCreatePipelineLayout(): max per-stage inline uniform block bindings count (%d) exceeds device "
"maxPerStageDescriptorUpdateAfterBindInlineUniformBlocks limit (%d).",
max_descriptors_per_stage_update_after_bind[DSL_TYPE_INLINE_UNIFORM_BLOCK],
phys_dev_ext_props.inline_uniform_block_props.maxPerStageDescriptorUpdateAfterBindInlineUniformBlocks);
}
// Total descriptors by type, summed across all pipeline stages
//
std::map<uint32_t, uint32_t> sum_all_stages_update_after_bind = GetDescriptorSum(set_layouts, false);
// Samplers
sum = sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_SAMPLER] +
sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER];
if (sum > phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindSamplers) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPipelineLayoutCreateInfo-pSetLayouts-03036",
"vkCreatePipelineLayout(): sum of sampler bindings among all stages (%d) exceeds device "
"maxDescriptorSetUpdateAfterBindSamplers limit (%d).",
sum, phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindSamplers);
}
// Uniform buffers
if (sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER] >
phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindUniformBuffers) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPipelineLayoutCreateInfo-pSetLayouts-03037",
"vkCreatePipelineLayout(): sum of uniform buffer bindings among all stages (%d) exceeds device "
"maxDescriptorSetUpdateAfterBindUniformBuffers limit (%d).",
sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER],
phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindUniformBuffers);
}
// Dynamic uniform buffers
if (sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC] >
phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindUniformBuffersDynamic) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPipelineLayoutCreateInfo-pSetLayouts-03038",
"vkCreatePipelineLayout(): sum of dynamic uniform buffer bindings among all stages (%d) exceeds device "
"maxDescriptorSetUpdateAfterBindUniformBuffersDynamic limit (%d).",
sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC],
phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindUniformBuffersDynamic);
}
// Storage buffers
if (sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_STORAGE_BUFFER] >
phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindStorageBuffers) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPipelineLayoutCreateInfo-pSetLayouts-03039",
"vkCreatePipelineLayout(): sum of storage buffer bindings among all stages (%d) exceeds device "
"maxDescriptorSetUpdateAfterBindStorageBuffers limit (%d).",
sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_STORAGE_BUFFER],
phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindStorageBuffers);
}
// Dynamic storage buffers
if (sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC] >
phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindStorageBuffersDynamic) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPipelineLayoutCreateInfo-pSetLayouts-03040",
"vkCreatePipelineLayout(): sum of dynamic storage buffer bindings among all stages (%d) exceeds device "
"maxDescriptorSetUpdateAfterBindStorageBuffersDynamic limit (%d).",
sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC],
phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindStorageBuffersDynamic);
}
// Sampled images
sum = sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE] +
sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER] +
sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER];
if (sum > phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindSampledImages) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPipelineLayoutCreateInfo-pSetLayouts-03041",
"vkCreatePipelineLayout(): sum of sampled image bindings among all stages (%d) exceeds device "
"maxDescriptorSetUpdateAfterBindSampledImages limit (%d).",
sum, phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindSampledImages);
}
// Storage images
sum = sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_STORAGE_IMAGE] +
sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER];
if (sum > phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindStorageImages) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPipelineLayoutCreateInfo-pSetLayouts-03042",
"vkCreatePipelineLayout(): sum of storage image bindings among all stages (%d) exceeds device "
"maxDescriptorSetUpdateAfterBindStorageImages limit (%d).",
sum, phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindStorageImages);
}
// Input attachments
if (sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT] >
phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindInputAttachments) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPipelineLayoutCreateInfo-pSetLayouts-03043",
"vkCreatePipelineLayout(): sum of input attachment bindings among all stages (%d) exceeds device "
"maxDescriptorSetUpdateAfterBindInputAttachments limit (%d).",
sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT],
phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindInputAttachments);
}
// Inline uniform blocks
if (sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT] >
phys_dev_ext_props.inline_uniform_block_props.maxDescriptorSetUpdateAfterBindInlineUniformBlocks) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPipelineLayoutCreateInfo-descriptorType-02217",
"vkCreatePipelineLayout(): sum of inline uniform block bindings among all stages (%d) exceeds device "
"maxDescriptorSetUpdateAfterBindInlineUniformBlocks limit (%d).",
sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT],
phys_dev_ext_props.inline_uniform_block_props.maxDescriptorSetUpdateAfterBindInlineUniformBlocks);
}
}
return skip;
}
bool CoreChecks::PreCallValidateResetDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool,
VkDescriptorPoolResetFlags flags) const {
// Make sure sets being destroyed are not currently in-use
if (disabled.idle_descriptor_set) return false;
bool skip = false;
const DESCRIPTOR_POOL_STATE *pPool = GetDescriptorPoolState(descriptorPool);
if (pPool != nullptr) {
for (auto ds : pPool->sets) {
if (ds && ds->in_use.load()) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
HandleToUint64(descriptorPool), "VUID-vkResetDescriptorPool-descriptorPool-00313",
"It is invalid to call vkResetDescriptorPool() with descriptor sets in use by a command buffer.");
if (skip) break;
}
}
}
return skip;
}
// Ensure the pool contains enough descriptors and descriptor sets to satisfy
// an allocation request. Fills common_data with the total number of descriptors of each type required,
// as well as DescriptorSetLayout ptrs used for later update.
bool CoreChecks::PreCallValidateAllocateDescriptorSets(VkDevice device, const VkDescriptorSetAllocateInfo *pAllocateInfo,
VkDescriptorSet *pDescriptorSets, void *ads_state_data) const {
StateTracker::PreCallValidateAllocateDescriptorSets(device, pAllocateInfo, pDescriptorSets, ads_state_data);
cvdescriptorset::AllocateDescriptorSetsData *ads_state =
reinterpret_cast<cvdescriptorset::AllocateDescriptorSetsData *>(ads_state_data);
// All state checks for AllocateDescriptorSets is done in single function
return ValidateAllocateDescriptorSets(pAllocateInfo, ads_state);
}
bool CoreChecks::PreCallValidateFreeDescriptorSets(VkDevice device, VkDescriptorPool descriptorPool, uint32_t count,
const VkDescriptorSet *pDescriptorSets) const {
// Make sure that no sets being destroyed are in-flight
bool skip = false;
// First make sure sets being destroyed are not currently in-use
for (uint32_t i = 0; i < count; ++i) {
if (pDescriptorSets[i] != VK_NULL_HANDLE) {
skip |= ValidateIdleDescriptorSet(pDescriptorSets[i], "vkFreeDescriptorSets");
}
}
const DESCRIPTOR_POOL_STATE *pool_state = GetDescriptorPoolState(descriptorPool);
if (pool_state && !(VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT & pool_state->createInfo.flags)) {
// Can't Free from a NON_FREE pool
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
HandleToUint64(descriptorPool), "VUID-vkFreeDescriptorSets-descriptorPool-00312",
"It is invalid to call vkFreeDescriptorSets() with a pool created without setting "
"VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT.");
}
return skip;
}
bool CoreChecks::PreCallValidateUpdateDescriptorSets(VkDevice device, uint32_t descriptorWriteCount,
const VkWriteDescriptorSet *pDescriptorWrites, uint32_t descriptorCopyCount,
const VkCopyDescriptorSet *pDescriptorCopies) const {
// First thing to do is perform map look-ups.
// NOTE : UpdateDescriptorSets is somewhat unique in that it's operating on a number of DescriptorSets
// so we can't just do a single map look-up up-front, but do them individually in functions below
// Now make call(s) that validate state, but don't perform state updates in this function
// Note, here DescriptorSets is unique in that we don't yet have an instance. Using a helper function in the
// namespace which will parse params and make calls into specific class instances
return ValidateUpdateDescriptorSets(descriptorWriteCount, pDescriptorWrites, descriptorCopyCount, pDescriptorCopies,
"vkUpdateDescriptorSets()");
}
bool CoreChecks::PreCallValidateBeginCommandBuffer(VkCommandBuffer commandBuffer,
const VkCommandBufferBeginInfo *pBeginInfo) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
if (!cb_state) return false;
bool skip = false;
if (cb_state->in_use.load()) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkBeginCommandBuffer-commandBuffer-00049",
"Calling vkBeginCommandBuffer() on active %s before it has completed. You must check "
"command buffer fence before this call.",
report_data->FormatHandle(commandBuffer).c_str());
}
if (cb_state->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
// Secondary Command Buffer
const VkCommandBufferInheritanceInfo *pInfo = pBeginInfo->pInheritanceInfo;
if (!pInfo) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkBeginCommandBuffer-commandBuffer-00051",
"vkBeginCommandBuffer(): Secondary %s must have inheritance info.",
report_data->FormatHandle(commandBuffer).c_str());
} else {
if (pBeginInfo->flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT) {
assert(pInfo->renderPass);
const auto *framebuffer = GetFramebufferState(pInfo->framebuffer);
if (framebuffer) {
if (framebuffer->createInfo.renderPass != pInfo->renderPass) {
const auto *render_pass = GetRenderPassState(pInfo->renderPass);
// renderPass that framebuffer was created with must be compatible with local renderPass
skip |= ValidateRenderPassCompatibility("framebuffer", framebuffer->rp_state.get(), "command buffer",
render_pass, "vkBeginCommandBuffer()",
"VUID-VkCommandBufferBeginInfo-flags-00055");
}
}
}
if ((pInfo->occlusionQueryEnable == VK_FALSE || enabled_features.core.occlusionQueryPrecise == VK_FALSE) &&
(pInfo->queryFlags & VK_QUERY_CONTROL_PRECISE_BIT)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkBeginCommandBuffer-commandBuffer-00052",
"vkBeginCommandBuffer(): Secondary %s must not have VK_QUERY_CONTROL_PRECISE_BIT if "
"occulusionQuery is disabled or the device does not support precise occlusion queries.",
report_data->FormatHandle(commandBuffer).c_str());
}
}
if (pInfo && pInfo->renderPass != VK_NULL_HANDLE) {
const auto *renderPass = GetRenderPassState(pInfo->renderPass);
if (renderPass) {
if (pInfo->subpass >= renderPass->createInfo.subpassCount) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-VkCommandBufferBeginInfo-flags-00054",
"vkBeginCommandBuffer(): Secondary %s must have a subpass index (%d) that is "
"less than the number of subpasses (%d).",
report_data->FormatHandle(commandBuffer).c_str(), pInfo->subpass,
renderPass->createInfo.subpassCount);
}
}
}
}
if (CB_RECORDING == cb_state->state) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkBeginCommandBuffer-commandBuffer-00049",
"vkBeginCommandBuffer(): Cannot call Begin on %s in the RECORDING state. Must first call "
"vkEndCommandBuffer().",
report_data->FormatHandle(commandBuffer).c_str());
} else if (CB_RECORDED == cb_state->state || CB_INVALID_COMPLETE == cb_state->state) {
VkCommandPool cmdPool = cb_state->createInfo.commandPool;
const auto *pPool = cb_state->command_pool.get();
if (!(VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT & pPool->createFlags)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkBeginCommandBuffer-commandBuffer-00050",
"Call to vkBeginCommandBuffer() on %s attempts to implicitly reset cmdBuffer created from "
"%s that does NOT have the VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT bit set.",
report_data->FormatHandle(commandBuffer).c_str(), report_data->FormatHandle(cmdPool).c_str());
}
}
auto chained_device_group_struct = lvl_find_in_chain<VkDeviceGroupCommandBufferBeginInfo>(pBeginInfo->pNext);
if (chained_device_group_struct) {
skip |= ValidateDeviceMaskToPhysicalDeviceCount(
chained_device_group_struct->deviceMask, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(commandBuffer),
"VUID-VkDeviceGroupCommandBufferBeginInfo-deviceMask-00106");
skip |=
ValidateDeviceMaskToZero(chained_device_group_struct->deviceMask, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-VkDeviceGroupCommandBufferBeginInfo-deviceMask-00107");
}
return skip;
}
bool CoreChecks::PreCallValidateEndCommandBuffer(VkCommandBuffer commandBuffer) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
if (!cb_state) return false;
bool skip = false;
if ((VK_COMMAND_BUFFER_LEVEL_PRIMARY == cb_state->createInfo.level) ||
!(cb_state->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
// This needs spec clarification to update valid usage, see comments in PR:
// https://github.com/KhronosGroup/Vulkan-ValidationLayers/issues/165
skip |= InsideRenderPass(cb_state, "vkEndCommandBuffer()", "VUID-vkEndCommandBuffer-commandBuffer-00060");
}
skip |= ValidateCmd(cb_state, CMD_ENDCOMMANDBUFFER, "vkEndCommandBuffer()");
for (auto query : cb_state->activeQueries) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkEndCommandBuffer-commandBuffer-00061",
"Ending command buffer with in progress query: %s, query %d.",
report_data->FormatHandle(query.pool).c_str(), query.query);
}
return skip;
}
bool CoreChecks::PreCallValidateResetCommandBuffer(VkCommandBuffer commandBuffer, VkCommandBufferResetFlags flags) const {
bool skip = false;
const CMD_BUFFER_STATE *pCB = GetCBState(commandBuffer);
if (!pCB) return false;
VkCommandPool cmdPool = pCB->createInfo.commandPool;
const auto *pPool = pCB->command_pool.get();
if (!(VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT & pPool->createFlags)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkResetCommandBuffer-commandBuffer-00046",
"Attempt to reset %s created from %s that does NOT have the "
"VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT bit set.",
report_data->FormatHandle(commandBuffer).c_str(), report_data->FormatHandle(cmdPool).c_str());
}
skip |= CheckCommandBufferInFlight(pCB, "reset", "VUID-vkResetCommandBuffer-commandBuffer-00045");
return skip;
}
static const char *GetPipelineTypeName(VkPipelineBindPoint pipelineBindPoint) {
switch (pipelineBindPoint) {
case VK_PIPELINE_BIND_POINT_GRAPHICS:
return "graphics";
case VK_PIPELINE_BIND_POINT_COMPUTE:
return "compute";
case VK_PIPELINE_BIND_POINT_RAY_TRACING_NV:
return "ray-tracing";
default:
return "unknown";
}
}
bool CoreChecks::PreCallValidateCmdBindPipeline(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint,
VkPipeline pipeline) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdBindPipeline()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
"VUID-vkCmdBindPipeline-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_BINDPIPELINE, "vkCmdBindPipeline()");
static const std::map<VkPipelineBindPoint, std::string> bindpoint_errors = {
std::make_pair(VK_PIPELINE_BIND_POINT_GRAPHICS, "VUID-vkCmdBindPipeline-pipelineBindPoint-00777"),
std::make_pair(VK_PIPELINE_BIND_POINT_COMPUTE, "VUID-vkCmdBindPipeline-pipelineBindPoint-00778"),
std::make_pair(VK_PIPELINE_BIND_POINT_RAY_TRACING_NV, "VUID-vkCmdBindPipeline-pipelineBindPoint-02391")};
skip |= ValidatePipelineBindPoint(cb_state, pipelineBindPoint, "vkCmdBindPipeline()", bindpoint_errors);
const auto *pipeline_state = GetPipelineState(pipeline);
assert(pipeline_state);
const auto &pipeline_state_bind_point = pipeline_state->getPipelineType();
if (pipelineBindPoint != pipeline_state_bind_point) {
if (pipelineBindPoint == VK_PIPELINE_BIND_POINT_GRAPHICS) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_state->commandBuffer), "VUID-vkCmdBindPipeline-pipelineBindPoint-00779",
"Cannot bind a pipeline of type %s to the graphics pipeline bind point",
GetPipelineTypeName(pipeline_state_bind_point));
} else if (pipelineBindPoint == VK_PIPELINE_BIND_POINT_COMPUTE) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_state->commandBuffer), "VUID-vkCmdBindPipeline-pipelineBindPoint-00780",
"Cannot bind a pipeline of type %s to the compute pipeline bind point",
GetPipelineTypeName(pipeline_state_bind_point));
} else if (pipelineBindPoint == VK_PIPELINE_BIND_POINT_RAY_TRACING_NV) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_state->commandBuffer), "VUID-vkCmdBindPipeline-pipelineBindPoint-02392",
"Cannot bind a pipeline of type %s to the ray-tracing pipeline bind point",
GetPipelineTypeName(pipeline_state_bind_point));
}
}
return skip;
}
bool CoreChecks::PreCallValidateCmdSetViewport(VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount,
const VkViewport *pViewports) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip =
ValidateCmdQueueFlags(cb_state, "vkCmdSetViewport()", VK_QUEUE_GRAPHICS_BIT, "VUID-vkCmdSetViewport-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_SETVIEWPORT, "vkCmdSetViewport()");
if (cb_state->static_status & CBSTATUS_VIEWPORT_SET) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdSetViewport-None-01221",
"vkCmdSetViewport(): pipeline was created without VK_DYNAMIC_STATE_VIEWPORT flag.");
}
return skip;
}
bool CoreChecks::PreCallValidateCmdSetScissor(VkCommandBuffer commandBuffer, uint32_t firstScissor, uint32_t scissorCount,
const VkRect2D *pScissors) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip =
ValidateCmdQueueFlags(cb_state, "vkCmdSetScissor()", VK_QUEUE_GRAPHICS_BIT, "VUID-vkCmdSetScissor-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_SETSCISSOR, "vkCmdSetScissor()");
if (cb_state->static_status & CBSTATUS_SCISSOR_SET) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdSetScissor-None-00590",
"vkCmdSetScissor(): pipeline was created without VK_DYNAMIC_STATE_SCISSOR flag..");
}
return skip;
}
bool CoreChecks::PreCallValidateCmdSetExclusiveScissorNV(VkCommandBuffer commandBuffer, uint32_t firstExclusiveScissor,
uint32_t exclusiveScissorCount, const VkRect2D *pExclusiveScissors) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdSetExclusiveScissorNV()", VK_QUEUE_GRAPHICS_BIT,
"VUID-vkCmdSetExclusiveScissorNV-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_SETEXCLUSIVESCISSORNV, "vkCmdSetExclusiveScissorNV()");
if (cb_state->static_status & CBSTATUS_EXCLUSIVE_SCISSOR_SET) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdSetExclusiveScissorNV-None-02032",
"vkCmdSetExclusiveScissorNV(): pipeline was created without VK_DYNAMIC_STATE_EXCLUSIVE_SCISSOR_NV flag.");
}
if (!enabled_features.exclusive_scissor.exclusiveScissor) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdSetExclusiveScissorNV-None-02031",
"vkCmdSetExclusiveScissorNV: The exclusiveScissor feature is disabled.");
}
return skip;
}
bool CoreChecks::PreCallValidateCmdBindShadingRateImageNV(VkCommandBuffer commandBuffer, VkImageView imageView,
VkImageLayout imageLayout) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdBindShadingRateImageNV()", VK_QUEUE_GRAPHICS_BIT,
"VUID-vkCmdBindShadingRateImageNV-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_BINDSHADINGRATEIMAGENV, "vkCmdBindShadingRateImageNV()");
if (!enabled_features.shading_rate_image.shadingRateImage) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdBindShadingRateImageNV-None-02058",
"vkCmdBindShadingRateImageNV: The shadingRateImage feature is disabled.");
}
if (imageView != VK_NULL_HANDLE) {
const auto view_state = GetImageViewState(imageView);
auto &ivci = view_state->create_info;
if (!view_state || (ivci.viewType != VK_IMAGE_VIEW_TYPE_2D && ivci.viewType != VK_IMAGE_VIEW_TYPE_2D_ARRAY)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT,
HandleToUint64(imageView), "VUID-vkCmdBindShadingRateImageNV-imageView-02059",
"vkCmdBindShadingRateImageNV: If imageView is not VK_NULL_HANDLE, it must be a valid "
"VkImageView handle of type VK_IMAGE_VIEW_TYPE_2D or VK_IMAGE_VIEW_TYPE_2D_ARRAY.");
}
if (view_state && ivci.format != VK_FORMAT_R8_UINT) {
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT, HandleToUint64(imageView),
"VUID-vkCmdBindShadingRateImageNV-imageView-02060",
"vkCmdBindShadingRateImageNV: If imageView is not VK_NULL_HANDLE, it must have a format of VK_FORMAT_R8_UINT.");
}
const VkImageCreateInfo *ici = view_state ? &GetImageState(view_state->create_info.image)->createInfo : nullptr;
if (ici && !(ici->usage & VK_IMAGE_USAGE_SHADING_RATE_IMAGE_BIT_NV)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT,
HandleToUint64(imageView), "VUID-vkCmdBindShadingRateImageNV-imageView-02061",
"vkCmdBindShadingRateImageNV: If imageView is not VK_NULL_HANDLE, the image must have been "
"created with VK_IMAGE_USAGE_SHADING_RATE_IMAGE_BIT_NV set.");
}
if (view_state) {
const auto image_state = GetImageState(view_state->create_info.image);
bool hit_error = false;
// XXX TODO: While the VUID says "each subresource", only the base mip level is
// actually used. Since we don't have an existing convenience function to iterate
// over all mip levels, just don't bother with non-base levels.
const VkImageSubresourceRange &range = view_state->create_info.subresourceRange;
VkImageSubresourceLayers subresource = {range.aspectMask, range.baseMipLevel, range.baseArrayLayer, range.layerCount};
if (image_state) {
skip |= VerifyImageLayout(cb_state, image_state, subresource, imageLayout, VK_IMAGE_LAYOUT_SHADING_RATE_OPTIMAL_NV,
"vkCmdCopyImage()", "VUID-vkCmdBindShadingRateImageNV-imageLayout-02063",
"VUID-vkCmdBindShadingRateImageNV-imageView-02062", &hit_error);
}
}
}
return skip;
}
bool CoreChecks::PreCallValidateCmdSetViewportShadingRatePaletteNV(VkCommandBuffer commandBuffer, uint32_t firstViewport,
uint32_t viewportCount,
const VkShadingRatePaletteNV *pShadingRatePalettes) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdSetViewportShadingRatePaletteNV()", VK_QUEUE_GRAPHICS_BIT,
"VUID-vkCmdSetViewportShadingRatePaletteNV-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_SETVIEWPORTSHADINGRATEPALETTENV, "vkCmdSetViewportShadingRatePaletteNV()");
if (!enabled_features.shading_rate_image.shadingRateImage) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdSetViewportShadingRatePaletteNV-None-02064",
"vkCmdSetViewportShadingRatePaletteNV: The shadingRateImage feature is disabled.");
}
if (cb_state->static_status & CBSTATUS_SHADING_RATE_PALETTE_SET) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdSetViewportShadingRatePaletteNV-None-02065",
"vkCmdSetViewportShadingRatePaletteNV(): pipeline was created without "
"VK_DYNAMIC_STATE_VIEWPORT_SHADING_RATE_PALETTE_NV flag.");
}
for (uint32_t i = 0; i < viewportCount; ++i) {
auto *palette = &pShadingRatePalettes[i];
if (palette->shadingRatePaletteEntryCount == 0 ||
palette->shadingRatePaletteEntryCount > phys_dev_ext_props.shading_rate_image_props.shadingRatePaletteSize) {
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-VkShadingRatePaletteNV-shadingRatePaletteEntryCount-02071",
"vkCmdSetViewportShadingRatePaletteNV: shadingRatePaletteEntryCount must be between 1 and shadingRatePaletteSize.");
}
}
return skip;
}
bool CoreChecks::ValidateGeometryTrianglesNV(const VkGeometryTrianglesNV &triangles, VkDebugReportObjectTypeEXT object_type,
uint64_t object_handle, const char *func_name) const {
bool skip = false;
const BUFFER_STATE *vb_state = GetBufferState(triangles.vertexData);
if (vb_state != nullptr && vb_state->binding.size <= triangles.vertexOffset) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, object_type, object_handle,
"VUID-VkGeometryTrianglesNV-vertexOffset-02428", "%s", func_name);
}
const BUFFER_STATE *ib_state = GetBufferState(triangles.indexData);
if (ib_state != nullptr && ib_state->binding.size <= triangles.indexOffset) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, object_type, object_handle,
"VUID-VkGeometryTrianglesNV-indexOffset-02431", "%s", func_name);
}
const BUFFER_STATE *td_state = GetBufferState(triangles.transformData);
if (td_state != nullptr && td_state->binding.size <= triangles.transformOffset) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, object_type, object_handle,
"VUID-VkGeometryTrianglesNV-transformOffset-02437", "%s", func_name);
}
return skip;
}
bool CoreChecks::ValidateGeometryAABBNV(const VkGeometryAABBNV &aabbs, VkDebugReportObjectTypeEXT object_type,
uint64_t object_handle, const char *func_name) const {
bool skip = false;
const BUFFER_STATE *aabb_state = GetBufferState(aabbs.aabbData);
if (aabb_state != nullptr && aabb_state->binding.size > 0 && aabb_state->binding.size <= aabbs.offset) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, object_type, object_handle,
"VUID-VkGeometryAABBNV-offset-02439", "%s", func_name);
}
return skip;
}
bool CoreChecks::ValidateGeometryNV(const VkGeometryNV &geometry, VkDebugReportObjectTypeEXT object_type, uint64_t object_handle,
const char *func_name) const {
bool skip = false;
if (geometry.geometryType == VK_GEOMETRY_TYPE_TRIANGLES_NV) {
skip = ValidateGeometryTrianglesNV(geometry.geometry.triangles, object_type, object_handle, func_name);
} else if (geometry.geometryType == VK_GEOMETRY_TYPE_AABBS_NV) {
skip = ValidateGeometryAABBNV(geometry.geometry.aabbs, object_type, object_handle, func_name);
}
return skip;
}
bool CoreChecks::PreCallValidateCreateAccelerationStructureNV(VkDevice device,
const VkAccelerationStructureCreateInfoNV *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkAccelerationStructureNV *pAccelerationStructure) const {
bool skip = false;
if (pCreateInfo != nullptr && pCreateInfo->info.type == VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_NV) {
for (uint32_t i = 0; i < pCreateInfo->info.geometryCount; i++) {
skip |= ValidateGeometryNV(pCreateInfo->info.pGeometries[i], VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(device), "vkCreateAccelerationStructureNV():");
}
}
return skip;
}
bool CoreChecks::ValidateBindAccelerationStructureMemoryNV(VkDevice device,
const VkBindAccelerationStructureMemoryInfoNV &info) const {
bool skip = false;
const ACCELERATION_STRUCTURE_STATE *as_state = GetAccelerationStructureState(info.accelerationStructure);
if (!as_state) {
return skip;
}
uint64_t as_handle = HandleToUint64(info.accelerationStructure);
if (!as_state->GetBoundMemory().empty()) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_ACCELERATION_STRUCTURE_NV_EXT,
as_handle, "VUID-VkBindAccelerationStructureMemoryInfoNV-accelerationStructure-02450",
"vkBindAccelerationStructureMemoryNV(): accelerationStructure must not already be backed by a memory object.");
}
// Validate bound memory range information
const auto mem_info = GetDevMemState(info.memory);
if (mem_info) {
skip |= ValidateInsertAccelerationStructureMemoryRange(info.accelerationStructure, mem_info, info.memoryOffset,
as_state->memory_requirements.memoryRequirements,
"vkBindAccelerationStructureMemoryNV()");
skip |= ValidateMemoryTypes(mem_info, as_state->memory_requirements.memoryRequirements.memoryTypeBits,
"vkBindAccelerationStructureMemoryNV()",
"VUID-VkBindAccelerationStructureMemoryInfoNV-memory-02593");
}
// Validate memory requirements alignment
if (SafeModulo(info.memoryOffset, as_state->memory_requirements.memoryRequirements.alignment) != 0) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_ACCELERATION_STRUCTURE_NV_EXT,
as_handle, "VUID-VkBindAccelerationStructureMemoryInfoNV-memoryOffset-02594",
"vkBindAccelerationStructureMemoryNV(): memoryOffset is 0x%" PRIxLEAST64
" but must be an integer multiple of the VkMemoryRequirements::alignment value 0x%" PRIxLEAST64
", returned from a call to vkGetAccelerationStructureMemoryRequirementsNV with accelerationStructure"
"and type of VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_OBJECT_NV.",
info.memoryOffset, as_state->memory_requirements.memoryRequirements.alignment);
}
if (mem_info) {
// Validate memory requirements size
if (as_state->memory_requirements.memoryRequirements.size > (mem_info->alloc_info.allocationSize - info.memoryOffset)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, as_handle,
"VUID-VkBindAccelerationStructureMemoryInfoNV-size-02595",
"vkBindAccelerationStructureMemoryNV(): memory size minus memoryOffset is 0x%" PRIxLEAST64
" but must be at least as large as VkMemoryRequirements::size value 0x%" PRIxLEAST64
", returned from a call to vkGetAccelerationStructureMemoryRequirementsNV with accelerationStructure"
"and type of VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_OBJECT_NV.",
mem_info->alloc_info.allocationSize - info.memoryOffset,
as_state->memory_requirements.memoryRequirements.size);
}
}
return skip;
}
bool CoreChecks::PreCallValidateBindAccelerationStructureMemoryNV(VkDevice device, uint32_t bindInfoCount,
const VkBindAccelerationStructureMemoryInfoNV *pBindInfos) const {
bool skip = false;
for (uint32_t i = 0; i < bindInfoCount; i++) {
skip |= ValidateBindAccelerationStructureMemoryNV(device, pBindInfos[i]);
}
return skip;
}
bool CoreChecks::PreCallValidateGetAccelerationStructureHandleNV(VkDevice device, VkAccelerationStructureNV accelerationStructure,
size_t dataSize, void *pData) const {
bool skip = false;
const ACCELERATION_STRUCTURE_STATE *as_state = GetAccelerationStructureState(accelerationStructure);
if (as_state != nullptr) {
// TODO: update the fake VUID below once the real one is generated.
skip = ValidateMemoryIsBoundToAccelerationStructure(
as_state, "vkGetAccelerationStructureHandleNV",
"UNASSIGNED-vkGetAccelerationStructureHandleNV-accelerationStructure-XXXX");
}
return skip;
}
bool CoreChecks::PreCallValidateCmdBuildAccelerationStructureNV(VkCommandBuffer commandBuffer,
const VkAccelerationStructureInfoNV *pInfo, VkBuffer instanceData,
VkDeviceSize instanceOffset, VkBool32 update,
VkAccelerationStructureNV dst, VkAccelerationStructureNV src,
VkBuffer scratch, VkDeviceSize scratchOffset) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdBuildAccelerationStructureNV()", VK_QUEUE_COMPUTE_BIT,
"VUID-vkCmdBuildAccelerationStructureNV-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_BUILDACCELERATIONSTRUCTURENV, "vkCmdBuildAccelerationStructureNV()");
if (pInfo != nullptr && pInfo->type == VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_NV) {
for (uint32_t i = 0; i < pInfo->geometryCount; i++) {
skip |= ValidateGeometryNV(pInfo->pGeometries[i], VK_DEBUG_REPORT_OBJECT_TYPE_ACCELERATION_STRUCTURE_NV_EXT,
HandleToUint64(device), "vkCmdBuildAccelerationStructureNV():");
}
}
if (pInfo != nullptr && pInfo->geometryCount > phys_dev_ext_props.ray_tracing_props.maxGeometryCount) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdBuildAccelerationStructureNV-geometryCount-02241",
"vkCmdBuildAccelerationStructureNV(): geometryCount [%d] must be less than or equal to "
"VkPhysicalDeviceRayTracingPropertiesNV::maxGeometryCount.",
pInfo->geometryCount);
}
const ACCELERATION_STRUCTURE_STATE *dst_as_state = GetAccelerationStructureState(dst);
const ACCELERATION_STRUCTURE_STATE *src_as_state = GetAccelerationStructureState(src);
const BUFFER_STATE *scratch_buffer_state = GetBufferState(scratch);
if (dst_as_state != nullptr && pInfo != nullptr) {
if (dst_as_state->create_info.info.type != pInfo->type) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdBuildAccelerationStructureNV-dst-02488",
"vkCmdBuildAccelerationStructureNV(): create info VkAccelerationStructureInfoNV::type"
"[%s] must be identical to build info VkAccelerationStructureInfoNV::type [%s].",
string_VkAccelerationStructureTypeNV(dst_as_state->create_info.info.type),
string_VkAccelerationStructureTypeNV(pInfo->type));
}
if (dst_as_state->create_info.info.flags != pInfo->flags) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdBuildAccelerationStructureNV-dst-02488",
"vkCmdBuildAccelerationStructureNV(): create info VkAccelerationStructureInfoNV::flags"
"[0x%X] must be identical to build info VkAccelerationStructureInfoNV::flags [0x%X].",
dst_as_state->create_info.info.flags, pInfo->flags);
}
if (dst_as_state->create_info.info.instanceCount < pInfo->instanceCount) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdBuildAccelerationStructureNV-dst-02488",
"vkCmdBuildAccelerationStructureNV(): create info VkAccelerationStructureInfoNV::instanceCount "
"[%d] must be greater than or equal to build info VkAccelerationStructureInfoNV::instanceCount [%d].",
dst_as_state->create_info.info.instanceCount, pInfo->instanceCount);
}
if (dst_as_state->create_info.info.geometryCount < pInfo->geometryCount) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdBuildAccelerationStructureNV-dst-02488",
"vkCmdBuildAccelerationStructureNV(): create info VkAccelerationStructureInfoNV::geometryCount"
"[%d] must be greater than or equal to build info VkAccelerationStructureInfoNV::geometryCount [%d].",
dst_as_state->create_info.info.geometryCount, pInfo->geometryCount);
} else {
for (uint32_t i = 0; i < pInfo->geometryCount; i++) {
const VkGeometryDataNV &create_geometry_data = dst_as_state->create_info.info.pGeometries[i].geometry;
const VkGeometryDataNV &build_geometry_data = pInfo->pGeometries[i].geometry;
if (create_geometry_data.triangles.vertexCount < build_geometry_data.triangles.vertexCount) {
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdBuildAccelerationStructureNV-dst-02488",
"vkCmdBuildAccelerationStructureNV(): create info pGeometries[%d].geometry.triangles.vertexCount [%d]"
"must be greater than or equal to build info pGeometries[%d].geometry.triangles.vertexCount [%d].",
i, create_geometry_data.triangles.vertexCount, i, build_geometry_data.triangles.vertexCount);
break;
}
if (create_geometry_data.triangles.indexCount < build_geometry_data.triangles.indexCount) {
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdBuildAccelerationStructureNV-dst-02488",
"vkCmdBuildAccelerationStructureNV(): create info pGeometries[%d].geometry.triangles.indexCount [%d]"
"must be greater than or equal to build info pGeometries[%d].geometry.triangles.indexCount [%d].",
i, create_geometry_data.triangles.indexCount, i, build_geometry_data.triangles.indexCount);
break;
}
if (create_geometry_data.aabbs.numAABBs < build_geometry_data.aabbs.numAABBs) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdBuildAccelerationStructureNV-dst-02488",
"vkCmdBuildAccelerationStructureNV(): create info pGeometries[%d].geometry.aabbs.numAABBs [%d]"
"must be greater than or equal to build info pGeometries[%d].geometry.aabbs.numAABBs [%d].",
i, create_geometry_data.aabbs.numAABBs, i, build_geometry_data.aabbs.numAABBs);
break;
}
}
}
}
if (dst_as_state != nullptr) {
skip |= ValidateMemoryIsBoundToAccelerationStructure(
dst_as_state, "vkCmdBuildAccelerationStructureNV()",
"UNASSIGNED-CoreValidation-DrawState-InvalidCommandBuffer-VkAccelerationStructureNV");
}
if (update == VK_TRUE) {
if (src == VK_NULL_HANDLE) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdBuildAccelerationStructureNV-update-02489",
"vkCmdBuildAccelerationStructureNV(): If update is VK_TRUE, src must not be VK_NULL_HANDLE.");
} else {
if (src_as_state == nullptr || !src_as_state->built ||
!(src_as_state->build_info.flags & VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_UPDATE_BIT_NV)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdBuildAccelerationStructureNV-update-02489",
"vkCmdBuildAccelerationStructureNV(): If update is VK_TRUE, src must have been built before "
"with VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_UPDATE_BIT_NV set in "
"VkAccelerationStructureInfoNV::flags.");
}
}
if (dst_as_state != nullptr && !dst_as_state->update_scratch_memory_requirements_checked) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_ACCELERATION_STRUCTURE_NV_EXT,
HandleToUint64(dst), kVUID_Core_CmdBuildAccelNV_NoUpdateMemReqQuery,
"vkCmdBuildAccelerationStructureNV(): Updating %s but vkGetAccelerationStructureMemoryRequirementsNV() "
"has not been called for update scratch memory.",
report_data->FormatHandle(dst_as_state->acceleration_structure).c_str());
// Use requirements fetched at create time
}
if (scratch_buffer_state != nullptr && dst_as_state != nullptr &&
dst_as_state->update_scratch_memory_requirements.memoryRequirements.size >
(scratch_buffer_state->binding.size - scratchOffset)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdBuildAccelerationStructureNV-update-02492",
"vkCmdBuildAccelerationStructureNV(): If update is VK_TRUE, The size member of the "
"VkMemoryRequirements structure returned from a call to "
"vkGetAccelerationStructureMemoryRequirementsNV with "
"VkAccelerationStructureMemoryRequirementsInfoNV::accelerationStructure set to dst and "
"VkAccelerationStructureMemoryRequirementsInfoNV::type set to "
"VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_UPDATE_SCRATCH_NV must be less than "
"or equal to the size of scratch minus scratchOffset");
}
} else {
if (dst_as_state != nullptr && !dst_as_state->build_scratch_memory_requirements_checked) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_ACCELERATION_STRUCTURE_NV_EXT,
HandleToUint64(dst), kVUID_Core_CmdBuildAccelNV_NoScratchMemReqQuery,
"vkCmdBuildAccelerationStructureNV(): Assigning scratch buffer to %s but "
"vkGetAccelerationStructureMemoryRequirementsNV() has not been called for scratch memory.",
report_data->FormatHandle(dst_as_state->acceleration_structure).c_str());
// Use requirements fetched at create time
}
if (scratch_buffer_state != nullptr && dst_as_state != nullptr &&
dst_as_state->build_scratch_memory_requirements.memoryRequirements.size >
(scratch_buffer_state->binding.size - scratchOffset)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdBuildAccelerationStructureNV-update-02491",
"vkCmdBuildAccelerationStructureNV(): If update is VK_FALSE, The size member of the "
"VkMemoryRequirements structure returned from a call to "
"vkGetAccelerationStructureMemoryRequirementsNV with "
"VkAccelerationStructureMemoryRequirementsInfoNV::accelerationStructure set to dst and "
"VkAccelerationStructureMemoryRequirementsInfoNV::type set to "
"VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_BUILD_SCRATCH_NV must be less than "
"or equal to the size of scratch minus scratchOffset");
}
}
return skip;
}
bool CoreChecks::PreCallValidateCmdCopyAccelerationStructureNV(VkCommandBuffer commandBuffer, VkAccelerationStructureNV dst,
VkAccelerationStructureNV src,
VkCopyAccelerationStructureModeNV mode) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdCopyAccelerationStructureNV()", VK_QUEUE_COMPUTE_BIT,
"VUID-vkCmdCopyAccelerationStructureNV-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_COPYACCELERATIONSTRUCTURENV, "vkCmdCopyAccelerationStructureNV()");
const ACCELERATION_STRUCTURE_STATE *dst_as_state = GetAccelerationStructureState(dst);
const ACCELERATION_STRUCTURE_STATE *src_as_state = GetAccelerationStructureState(src);
if (dst_as_state != nullptr) {
skip |= ValidateMemoryIsBoundToAccelerationStructure(
dst_as_state, "vkCmdBuildAccelerationStructureNV()",
"UNASSIGNED-CoreValidation-DrawState-InvalidCommandBuffer-VkAccelerationStructureNV");
}
if (mode == VK_COPY_ACCELERATION_STRUCTURE_MODE_COMPACT_NV) {
if (src_as_state != nullptr &&
(!src_as_state->built || !(src_as_state->build_info.flags & VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_COMPACTION_BIT_NV))) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdCopyAccelerationStructureNV-src-02497",
"vkCmdCopyAccelerationStructureNV(): src must have been built with "
"VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_COMPACTION_BIT_NV if mode is "
"VK_COPY_ACCELERATION_STRUCTURE_MODE_COMPACT_NV.");
}
}
return skip;
}
bool CoreChecks::PreCallValidateDestroyAccelerationStructureNV(VkDevice device, VkAccelerationStructureNV accelerationStructure,
const VkAllocationCallbacks *pAllocator) const {
const ACCELERATION_STRUCTURE_STATE *as_state = GetAccelerationStructureState(accelerationStructure);
const VulkanTypedHandle obj_struct(accelerationStructure, kVulkanObjectTypeAccelerationStructureNV);
bool skip = false;
if (as_state) {
skip |= ValidateObjectNotInUse(as_state, obj_struct, "vkDestroyAccelerationStructureNV",
"VUID-vkDestroyAccelerationStructureNV-accelerationStructure-02442");
}
return skip;
}
bool CoreChecks::PreCallValidateCmdSetViewportWScalingNV(VkCommandBuffer commandBuffer, uint32_t firstViewport,
uint32_t viewportCount,
const VkViewportWScalingNV *pViewportWScalings) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdSetViewportWScalingNV()", VK_QUEUE_GRAPHICS_BIT,
"VUID-vkCmdSetViewportWScalingNV-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_SETVIEWPORTWSCALINGNV, "vkCmdSetViewportWScalingNV()");
if (cb_state->static_status & CBSTATUS_VIEWPORT_W_SCALING_SET) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdSetViewportWScalingNV-None-01322",
"vkCmdSetViewportWScalingNV(): pipeline was created without VK_DYNAMIC_STATE_VIEWPORT_W_SCALING_NV flag.");
}
return skip;
}
bool CoreChecks::PreCallValidateCmdSetLineWidth(VkCommandBuffer commandBuffer, float lineWidth) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdSetLineWidth()", VK_QUEUE_GRAPHICS_BIT,
"VUID-vkCmdSetLineWidth-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_SETLINEWIDTH, "vkCmdSetLineWidth()");
if (cb_state->static_status & CBSTATUS_LINE_WIDTH_SET) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdSetLineWidth-None-00787",
"vkCmdSetLineWidth called but pipeline was created without VK_DYNAMIC_STATE_LINE_WIDTH flag.");
}
return skip;
}
bool CoreChecks::PreCallValidateCmdSetLineStippleEXT(VkCommandBuffer commandBuffer, uint32_t lineStippleFactor,
uint16_t lineStipplePattern) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdSetLineStippleEXT()", VK_QUEUE_GRAPHICS_BIT,
"VUID-vkCmdSetLineStippleEXT-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_SETLINESTIPPLEEXT, "vkCmdSetLineStippleEXT()");
if (cb_state->static_status & CBSTATUS_LINE_STIPPLE_SET) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdSetLineStippleEXT-None-02775",
"vkCmdSetLineStippleEXT called but pipeline was created without VK_DYNAMIC_STATE_LINE_STIPPLE_EXT flag.");
}
return skip;
}
bool CoreChecks::PreCallValidateCmdSetDepthBias(VkCommandBuffer commandBuffer, float depthBiasConstantFactor, float depthBiasClamp,
float depthBiasSlopeFactor) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdSetDepthBias()", VK_QUEUE_GRAPHICS_BIT,
"VUID-vkCmdSetDepthBias-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_SETDEPTHBIAS, "vkCmdSetDepthBias()");
if (cb_state->static_status & CBSTATUS_DEPTH_BIAS_SET) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdSetDepthBias-None-00789",
"vkCmdSetDepthBias(): pipeline was created without VK_DYNAMIC_STATE_DEPTH_BIAS flag..");
}
if ((depthBiasClamp != 0.0) && (!enabled_features.core.depthBiasClamp)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdSetDepthBias-depthBiasClamp-00790",
"vkCmdSetDepthBias(): the depthBiasClamp device feature is disabled: the depthBiasClamp parameter must "
"be set to 0.0.");
}
return skip;
}
bool CoreChecks::PreCallValidateCmdSetBlendConstants(VkCommandBuffer commandBuffer, const float blendConstants[4]) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdSetBlendConstants()", VK_QUEUE_GRAPHICS_BIT,
"VUID-vkCmdSetBlendConstants-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_SETBLENDCONSTANTS, "vkCmdSetBlendConstants()");
if (cb_state->static_status & CBSTATUS_BLEND_CONSTANTS_SET) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdSetBlendConstants-None-00612",
"vkCmdSetBlendConstants(): pipeline was created without VK_DYNAMIC_STATE_BLEND_CONSTANTS flag..");
}
return skip;
}
bool CoreChecks::PreCallValidateCmdSetDepthBounds(VkCommandBuffer commandBuffer, float minDepthBounds, float maxDepthBounds) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdSetDepthBounds()", VK_QUEUE_GRAPHICS_BIT,
"VUID-vkCmdSetDepthBounds-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_SETDEPTHBOUNDS, "vkCmdSetDepthBounds()");
if (cb_state->static_status & CBSTATUS_DEPTH_BOUNDS_SET) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdSetDepthBounds-None-00599",
"vkCmdSetDepthBounds(): pipeline was created without VK_DYNAMIC_STATE_DEPTH_BOUNDS flag..");
}
return skip;
}
bool CoreChecks::PreCallValidateCmdSetStencilCompareMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask,
uint32_t compareMask) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdSetStencilCompareMask()", VK_QUEUE_GRAPHICS_BIT,
"VUID-vkCmdSetStencilCompareMask-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_SETSTENCILCOMPAREMASK, "vkCmdSetStencilCompareMask()");
if (cb_state->static_status & CBSTATUS_STENCIL_READ_MASK_SET) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdSetStencilCompareMask-None-00602",
"vkCmdSetStencilCompareMask(): pipeline was created without VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK flag..");
}
return skip;
}
bool CoreChecks::PreCallValidateCmdSetStencilWriteMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask,
uint32_t writeMask) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdSetStencilWriteMask()", VK_QUEUE_GRAPHICS_BIT,
"VUID-vkCmdSetStencilWriteMask-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_SETSTENCILWRITEMASK, "vkCmdSetStencilWriteMask()");
if (cb_state->static_status & CBSTATUS_STENCIL_WRITE_MASK_SET) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdSetStencilWriteMask-None-00603",
"vkCmdSetStencilWriteMask(): pipeline was created without VK_DYNAMIC_STATE_STENCIL_WRITE_MASK flag..");
}
return skip;
}
bool CoreChecks::PreCallValidateCmdSetStencilReference(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask,
uint32_t reference) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdSetStencilReference()", VK_QUEUE_GRAPHICS_BIT,
"VUID-vkCmdSetStencilReference-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_SETSTENCILREFERENCE, "vkCmdSetStencilReference()");
if (cb_state->static_status & CBSTATUS_STENCIL_REFERENCE_SET) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdSetStencilReference-None-00604",
"vkCmdSetStencilReference(): pipeline was created without VK_DYNAMIC_STATE_STENCIL_REFERENCE flag..");
}
return skip;
}
static bool ValidateDynamicOffsetAlignment(const debug_report_data *report_data, const VkDescriptorSetLayoutBinding *binding,
VkDescriptorType test_type, VkDeviceSize alignment, const uint32_t *pDynamicOffsets,
const char *err_msg, const char *limit_name, uint32_t *offset_idx) {
bool skip = false;
if (binding->descriptorType == test_type) {
const auto end_idx = *offset_idx + binding->descriptorCount;
for (uint32_t current_idx = *offset_idx; current_idx < end_idx; current_idx++) {
if (SafeModulo(pDynamicOffsets[current_idx], alignment) != 0) {
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, err_msg,
"vkCmdBindDescriptorSets(): pDynamicOffsets[%d] is %d but must be a multiple of device limit %s 0x%" PRIxLEAST64
".",
current_idx, pDynamicOffsets[current_idx], limit_name, alignment);
}
}
*offset_idx = end_idx;
}
return skip;
}
bool CoreChecks::PreCallValidateCmdBindDescriptorSets(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint,
VkPipelineLayout layout, uint32_t firstSet, uint32_t setCount,
const VkDescriptorSet *pDescriptorSets, uint32_t dynamicOffsetCount,
const uint32_t *pDynamicOffsets) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = false;
skip |= ValidateCmdQueueFlags(cb_state, "vkCmdBindDescriptorSets()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
"VUID-vkCmdBindDescriptorSets-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_BINDDESCRIPTORSETS, "vkCmdBindDescriptorSets()");
// Track total count of dynamic descriptor types to make sure we have an offset for each one
uint32_t total_dynamic_descriptors = 0;
string error_string = "";
const auto *pipeline_layout = GetPipelineLayout(layout);
for (uint32_t set_idx = 0; set_idx < setCount; set_idx++) {
const cvdescriptorset::DescriptorSet *descriptor_set = GetSetNode(pDescriptorSets[set_idx]);
if (descriptor_set) {
// Verify that set being bound is compatible with overlapping setLayout of pipelineLayout
if (!VerifySetLayoutCompatibility(report_data, descriptor_set, pipeline_layout, set_idx + firstSet, error_string)) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
HandleToUint64(pDescriptorSets[set_idx]), "VUID-vkCmdBindDescriptorSets-pDescriptorSets-00358",
"descriptorSet #%u being bound is not compatible with overlapping descriptorSetLayout at index %u of "
"%s due to: %s.",
set_idx, set_idx + firstSet, report_data->FormatHandle(layout).c_str(), error_string.c_str());
}
auto set_dynamic_descriptor_count = descriptor_set->GetDynamicDescriptorCount();
if (set_dynamic_descriptor_count) {
// First make sure we won't overstep bounds of pDynamicOffsets array
if ((total_dynamic_descriptors + set_dynamic_descriptor_count) > dynamicOffsetCount) {
// Test/report this here, such that we don't run past the end of pDynamicOffsets in the else clause
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
HandleToUint64(pDescriptorSets[set_idx]), "VUID-vkCmdBindDescriptorSets-dynamicOffsetCount-00359",
"descriptorSet #%u (%s) requires %u dynamicOffsets, but only %u dynamicOffsets are left in "
"pDynamicOffsets array. There must be one dynamic offset for each dynamic descriptor being bound.",
set_idx, report_data->FormatHandle(pDescriptorSets[set_idx]).c_str(),
descriptor_set->GetDynamicDescriptorCount(), (dynamicOffsetCount - total_dynamic_descriptors));
// Set the number found to the maximum to prevent duplicate messages, or subsquent descriptor sets from
// testing against the "short tail" we're skipping below.
total_dynamic_descriptors = dynamicOffsetCount;
} else { // Validate dynamic offsets and Dynamic Offset Minimums
uint32_t cur_dyn_offset = total_dynamic_descriptors;
const auto dsl = descriptor_set->GetLayout();
const auto binding_count = dsl->GetBindingCount();
const auto &limits = phys_dev_props.limits;
for (uint32_t binding_idx = 0; binding_idx < binding_count; binding_idx++) {
const auto *binding = dsl->GetDescriptorSetLayoutBindingPtrFromIndex(binding_idx);
skip |= ValidateDynamicOffsetAlignment(report_data, binding, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC,
limits.minUniformBufferOffsetAlignment, pDynamicOffsets,
"VUID-vkCmdBindDescriptorSets-pDynamicOffsets-01971",
"minUniformBufferOffsetAlignment", &cur_dyn_offset);
skip |= ValidateDynamicOffsetAlignment(report_data, binding, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC,
limits.minStorageBufferOffsetAlignment, pDynamicOffsets,
"VUID-vkCmdBindDescriptorSets-pDynamicOffsets-01972",
"minStorageBufferOffsetAlignment", &cur_dyn_offset);
}
// Keep running total of dynamic descriptor count to verify at the end
total_dynamic_descriptors += set_dynamic_descriptor_count;
}
}
} else {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
HandleToUint64(pDescriptorSets[set_idx]), kVUID_Core_DrawState_InvalidSet,
"Attempt to bind %s that doesn't exist!", report_data->FormatHandle(pDescriptorSets[set_idx]).c_str());
}
}
// dynamicOffsetCount must equal the total number of dynamic descriptors in the sets being bound
if (total_dynamic_descriptors != dynamicOffsetCount) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_state->commandBuffer), "VUID-vkCmdBindDescriptorSets-dynamicOffsetCount-00359",
"Attempting to bind %u descriptorSets with %u dynamic descriptors, but dynamicOffsetCount is %u. It should "
"exactly match the number of dynamic descriptors.",
setCount, total_dynamic_descriptors, dynamicOffsetCount);
}
return skip;
}
// Validates that the supplied bind point is supported for the command buffer (vis. the command pool)
// Takes array of error codes as some of the VUID's (e.g. vkCmdBindPipeline) are written per bindpoint
// TODO add vkCmdBindPipeline bind_point validation using this call.
bool CoreChecks::ValidatePipelineBindPoint(const CMD_BUFFER_STATE *cb_state, VkPipelineBindPoint bind_point, const char *func_name,
const std::map<VkPipelineBindPoint, std::string> &bind_errors) const {
bool skip = false;
auto pool = cb_state->command_pool.get();
if (pool) { // The loss of a pool in a recording cmd is reported in DestroyCommandPool
static const std::map<VkPipelineBindPoint, VkQueueFlags> flag_mask = {
std::make_pair(VK_PIPELINE_BIND_POINT_GRAPHICS, static_cast<VkQueueFlags>(VK_QUEUE_GRAPHICS_BIT)),
std::make_pair(VK_PIPELINE_BIND_POINT_COMPUTE, static_cast<VkQueueFlags>(VK_QUEUE_COMPUTE_BIT)),
std::make_pair(VK_PIPELINE_BIND_POINT_RAY_TRACING_NV,
static_cast<VkQueueFlags>(VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT)),
};
const auto &qfp = GetPhysicalDeviceState()->queue_family_properties[pool->queueFamilyIndex];
if (0 == (qfp.queueFlags & flag_mask.at(bind_point))) {
const std::string &error = bind_errors.at(bind_point);
auto cb_u64 = HandleToUint64(cb_state->commandBuffer);
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, cb_u64,
error, "%s: %s was allocated from %s that does not support bindpoint %s.", func_name,
report_data->FormatHandle(cb_state->commandBuffer).c_str(),
report_data->FormatHandle(cb_state->createInfo.commandPool).c_str(),
string_VkPipelineBindPoint(bind_point));
}
}
return skip;
}
bool CoreChecks::PreCallValidateCmdPushDescriptorSetKHR(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint,
VkPipelineLayout layout, uint32_t set, uint32_t descriptorWriteCount,
const VkWriteDescriptorSet *pDescriptorWrites) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
const char *func_name = "vkCmdPushDescriptorSetKHR()";
bool skip = false;
skip |= ValidateCmd(cb_state, CMD_PUSHDESCRIPTORSETKHR, func_name);
skip |= ValidateCmdQueueFlags(cb_state, func_name, (VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT),
"VUID-vkCmdPushDescriptorSetKHR-commandBuffer-cmdpool");
static const std::map<VkPipelineBindPoint, std::string> bind_errors = {
std::make_pair(VK_PIPELINE_BIND_POINT_GRAPHICS, "VUID-vkCmdPushDescriptorSetKHR-pipelineBindPoint-00363"),
std::make_pair(VK_PIPELINE_BIND_POINT_COMPUTE, "VUID-vkCmdPushDescriptorSetKHR-pipelineBindPoint-00363"),
std::make_pair(VK_PIPELINE_BIND_POINT_RAY_TRACING_NV, "VUID-vkCmdPushDescriptorSetKHR-pipelineBindPoint-00363")};
skip |= ValidatePipelineBindPoint(cb_state, pipelineBindPoint, func_name, bind_errors);
const auto layout_data = GetPipelineLayout(layout);
// Validate the set index points to a push descriptor set and is in range
if (layout_data) {
const auto &set_layouts = layout_data->set_layouts;
const auto layout_u64 = HandleToUint64(layout);
if (set < set_layouts.size()) {
const auto dsl = set_layouts[set];
if (dsl) {
if (!dsl->IsPushDescriptor()) {
skip = log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_LAYOUT_EXT,
layout_u64, "VUID-vkCmdPushDescriptorSetKHR-set-00365",
"%s: Set index %" PRIu32 " does not match push descriptor set layout index for %s.", func_name,
set, report_data->FormatHandle(layout).c_str());
} else {
// Create an empty proxy in order to use the existing descriptor set update validation
// TODO move the validation (like this) that doesn't need descriptor set state to the DSL object so we
// don't have to do this.
cvdescriptorset::DescriptorSet proxy_ds(VK_NULL_HANDLE, nullptr, dsl, 0, nullptr, report_data);
skip |= ValidatePushDescriptorsUpdate(&proxy_ds, descriptorWriteCount, pDescriptorWrites, func_name);
}
}
} else {
skip = log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_LAYOUT_EXT, layout_u64,
"VUID-vkCmdPushDescriptorSetKHR-set-00364",
"%s: Set index %" PRIu32 " is outside of range for %s (set < %" PRIu32 ").", func_name, set,
report_data->FormatHandle(layout).c_str(), static_cast<uint32_t>(set_layouts.size()));
}
}
return skip;
}
static VkDeviceSize GetIndexAlignment(VkIndexType indexType) {
switch (indexType) {
case VK_INDEX_TYPE_UINT16:
return 2;
case VK_INDEX_TYPE_UINT32:
return 4;
case VK_INDEX_TYPE_UINT8_EXT:
return 1;
default:
// Not a real index type. Express no alignment requirement here; we expect upper layer
// to have already picked up on the enum being nonsense.
return 1;
}
}
bool CoreChecks::PreCallValidateCmdBindIndexBuffer(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
VkIndexType indexType) const {
const auto buffer_state = GetBufferState(buffer);
const auto cb_node = GetCBState(commandBuffer);
assert(buffer_state);
assert(cb_node);
bool skip =
ValidateBufferUsageFlags(buffer_state, VK_BUFFER_USAGE_INDEX_BUFFER_BIT, true, "VUID-vkCmdBindIndexBuffer-buffer-00433",
"vkCmdBindIndexBuffer()", "VK_BUFFER_USAGE_INDEX_BUFFER_BIT");
skip |= ValidateCmdQueueFlags(cb_node, "vkCmdBindIndexBuffer()", VK_QUEUE_GRAPHICS_BIT,
"VUID-vkCmdBindIndexBuffer-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_node, CMD_BINDINDEXBUFFER, "vkCmdBindIndexBuffer()");
skip |= ValidateMemoryIsBoundToBuffer(buffer_state, "vkCmdBindIndexBuffer()", "VUID-vkCmdBindIndexBuffer-buffer-00434");
const auto offset_align = GetIndexAlignment(indexType);
if (offset % offset_align) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdBindIndexBuffer-offset-00432",
"vkCmdBindIndexBuffer() offset (0x%" PRIxLEAST64 ") does not fall on alignment (%s) boundary.", offset,
string_VkIndexType(indexType));
}
return skip;
}
bool CoreChecks::PreCallValidateCmdBindVertexBuffers(VkCommandBuffer commandBuffer, uint32_t firstBinding, uint32_t bindingCount,
const VkBuffer *pBuffers, const VkDeviceSize *pOffsets) const {
const auto cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdBindVertexBuffers()", VK_QUEUE_GRAPHICS_BIT,
"VUID-vkCmdBindVertexBuffers-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_BINDVERTEXBUFFERS, "vkCmdBindVertexBuffers()");
for (uint32_t i = 0; i < bindingCount; ++i) {
const auto buffer_state = GetBufferState(pBuffers[i]);
assert(buffer_state);
skip |= ValidateBufferUsageFlags(buffer_state, VK_BUFFER_USAGE_VERTEX_BUFFER_BIT, true,
"VUID-vkCmdBindVertexBuffers-pBuffers-00627", "vkCmdBindVertexBuffers()",
"VK_BUFFER_USAGE_VERTEX_BUFFER_BIT");
skip |=
ValidateMemoryIsBoundToBuffer(buffer_state, "vkCmdBindVertexBuffers()", "VUID-vkCmdBindVertexBuffers-pBuffers-00628");
if (pOffsets[i] >= buffer_state->createInfo.size) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
HandleToUint64(buffer_state->buffer), "VUID-vkCmdBindVertexBuffers-pOffsets-00626",
"vkCmdBindVertexBuffers() offset (0x%" PRIxLEAST64 ") is beyond the end of the buffer.", pOffsets[i]);
}
}
return skip;
}
// Validate that an image's sampleCount matches the requirement for a specific API call
bool CoreChecks::ValidateImageSampleCount(const IMAGE_STATE *image_state, VkSampleCountFlagBits sample_count, const char *location,
const std::string &msgCode) const {
bool skip = false;
if (image_state->createInfo.samples != sample_count) {
skip =
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(image_state->image), msgCode, "%s for %s was created with a sample count of %s but must be %s.",
location, report_data->FormatHandle(image_state->image).c_str(),
string_VkSampleCountFlagBits(image_state->createInfo.samples), string_VkSampleCountFlagBits(sample_count));
}
return skip;
}
bool CoreChecks::PreCallValidateCmdUpdateBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset,
VkDeviceSize dataSize, const void *pData) const {
const auto cb_state = GetCBState(commandBuffer);
assert(cb_state);
const auto dst_buffer_state = GetBufferState(dstBuffer);
assert(dst_buffer_state);
bool skip = false;
skip |= ValidateMemoryIsBoundToBuffer(dst_buffer_state, "vkCmdUpdateBuffer()", "VUID-vkCmdUpdateBuffer-dstBuffer-00035");
// Validate that DST buffer has correct usage flags set
skip |=
ValidateBufferUsageFlags(dst_buffer_state, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true, "VUID-vkCmdUpdateBuffer-dstBuffer-00034",
"vkCmdUpdateBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
skip |=
ValidateCmdQueueFlags(cb_state, "vkCmdUpdateBuffer()", VK_QUEUE_TRANSFER_BIT | VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
"VUID-vkCmdUpdateBuffer-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_UPDATEBUFFER, "vkCmdUpdateBuffer()");
skip |= InsideRenderPass(cb_state, "vkCmdUpdateBuffer()", "VUID-vkCmdUpdateBuffer-renderpass");
return skip;
}
bool CoreChecks::PreCallValidateCmdSetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdSetEvent()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
"VUID-vkCmdSetEvent-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_SETEVENT, "vkCmdSetEvent()");
skip |= InsideRenderPass(cb_state, "vkCmdSetEvent()", "VUID-vkCmdSetEvent-renderpass");
skip |= ValidateStageMaskGsTsEnables(stageMask, "vkCmdSetEvent()", "VUID-vkCmdSetEvent-stageMask-01150",
"VUID-vkCmdSetEvent-stageMask-01151", "VUID-vkCmdSetEvent-stageMask-02107",
"VUID-vkCmdSetEvent-stageMask-02108");
return skip;
}
bool CoreChecks::PreCallValidateCmdResetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdResetEvent()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
"VUID-vkCmdResetEvent-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_RESETEVENT, "vkCmdResetEvent()");
skip |= InsideRenderPass(cb_state, "vkCmdResetEvent()", "VUID-vkCmdResetEvent-renderpass");
skip |= ValidateStageMaskGsTsEnables(stageMask, "vkCmdResetEvent()", "VUID-vkCmdResetEvent-stageMask-01154",
"VUID-vkCmdResetEvent-stageMask-01155", "VUID-vkCmdResetEvent-stageMask-02109",
"VUID-vkCmdResetEvent-stageMask-02110");
return skip;
}
// Return input pipeline stage flags, expanded for individual bits if VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT is set
static VkPipelineStageFlags ExpandPipelineStageFlags(const DeviceExtensions &extensions, VkPipelineStageFlags inflags) {
if (~inflags & VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT) return inflags;
return (inflags & ~VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT) |
(VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT | VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT |
(extensions.vk_nv_mesh_shader ? (VK_PIPELINE_STAGE_TASK_SHADER_BIT_NV | VK_PIPELINE_STAGE_MESH_SHADER_BIT_NV) : 0) |
VK_PIPELINE_STAGE_VERTEX_INPUT_BIT | VK_PIPELINE_STAGE_VERTEX_SHADER_BIT |
VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT | VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT |
VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT |
VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT |
VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT | VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT |
(extensions.vk_ext_conditional_rendering ? VK_PIPELINE_STAGE_CONDITIONAL_RENDERING_BIT_EXT : 0) |
(extensions.vk_ext_transform_feedback ? VK_PIPELINE_STAGE_TRANSFORM_FEEDBACK_BIT_EXT : 0) |
(extensions.vk_nv_shading_rate_image ? VK_PIPELINE_STAGE_SHADING_RATE_IMAGE_BIT_NV : 0) |
(extensions.vk_ext_fragment_density_map ? VK_PIPELINE_STAGE_FRAGMENT_DENSITY_PROCESS_BIT_EXT : 0));
}
static bool HasNonFramebufferStagePipelineStageFlags(VkPipelineStageFlags inflags) {
return (inflags & ~(VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT |
VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT)) != 0;
}
static int GetGraphicsPipelineStageLogicalOrdinal(VkPipelineStageFlagBits flag) {
// Note that the list (and lookup) ignore invalid-for-enabled-extension condition. This should be checked elsewhere
// and would greatly complicate this intentionally simple implementation
// clang-format off
const VkPipelineStageFlagBits ordered_array[] = {
VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT,
VK_PIPELINE_STAGE_VERTEX_INPUT_BIT,
VK_PIPELINE_STAGE_VERTEX_SHADER_BIT,
VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT,
VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT,
VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT,
VK_PIPELINE_STAGE_TRANSFORM_FEEDBACK_BIT_EXT,
// Including the task/mesh shaders here is not technically correct, as they are in a
// separate logical pipeline - but it works for the case this is currently used, and
// fixing it would require significant rework and end up with the code being far more
// verbose for no practical gain.
// However, worth paying attention to this if using this function in a new way.
VK_PIPELINE_STAGE_TASK_SHADER_BIT_NV,
VK_PIPELINE_STAGE_MESH_SHADER_BIT_NV,
VK_PIPELINE_STAGE_SHADING_RATE_IMAGE_BIT_NV,
VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT,
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT,
VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT
};
// clang-format on
const int ordered_array_length = sizeof(ordered_array) / sizeof(VkPipelineStageFlagBits);
for (int i = 0; i < ordered_array_length; ++i) {
if (ordered_array[i] == flag) {
return i;
}
}
return -1;
}
// The following two functions technically have O(N^2) complexity, but it's for a value of O that's largely
// stable and also rather tiny - this could definitely be rejigged to work more efficiently, but the impact
// on runtime is currently negligible, so it wouldn't gain very much.
// If we add a lot more graphics pipeline stages, this set of functions should be rewritten to accomodate.
static VkPipelineStageFlagBits GetLogicallyEarliestGraphicsPipelineStage(VkPipelineStageFlags inflags) {
VkPipelineStageFlagBits earliest_bit = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT;
int earliest_bit_order = GetGraphicsPipelineStageLogicalOrdinal(earliest_bit);
for (std::size_t i = 0; i < sizeof(VkPipelineStageFlagBits); ++i) {
VkPipelineStageFlagBits current_flag = (VkPipelineStageFlagBits)((inflags & 0x1u) << i);
if (current_flag) {
int new_order = GetGraphicsPipelineStageLogicalOrdinal(current_flag);
if (new_order != -1 && new_order < earliest_bit_order) {
earliest_bit_order = new_order;
earliest_bit = current_flag;
}
}
inflags = inflags >> 1;
}
return earliest_bit;
}
static VkPipelineStageFlagBits GetLogicallyLatestGraphicsPipelineStage(VkPipelineStageFlags inflags) {
VkPipelineStageFlagBits latest_bit = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
int latest_bit_order = GetGraphicsPipelineStageLogicalOrdinal(latest_bit);
for (std::size_t i = 0; i < sizeof(VkPipelineStageFlagBits); ++i) {
if (inflags & 0x1u) {
int new_order = GetGraphicsPipelineStageLogicalOrdinal((VkPipelineStageFlagBits)((inflags & 0x1u) << i));
if (new_order != -1 && new_order > latest_bit_order) {
latest_bit_order = new_order;
latest_bit = (VkPipelineStageFlagBits)((inflags & 0x1u) << i);
}
}
inflags = inflags >> 1;
}
return latest_bit;
}
// Verify image barrier image state and that the image is consistent with FB image
bool CoreChecks::ValidateImageBarrierAttachment(const char *funcName, CMD_BUFFER_STATE const *cb_state, VkFramebuffer framebuffer,
uint32_t active_subpass, const safe_VkSubpassDescription2KHR &sub_desc,
const VulkanTypedHandle &rp_handle, uint32_t img_index,
const VkImageMemoryBarrier &img_barrier) const {
bool skip = false;
const auto &fb_state = GetFramebufferState(framebuffer);
assert(fb_state);
const auto img_bar_image = img_barrier.image;
bool image_match = false;
bool sub_image_found = false; // Do we find a corresponding subpass description
VkImageLayout sub_image_layout = VK_IMAGE_LAYOUT_UNDEFINED;
uint32_t attach_index = 0;
// Verify that a framebuffer image matches barrier image
const auto attachmentCount = fb_state->createInfo.attachmentCount;
for (uint32_t attachment = 0; attachment < attachmentCount; ++attachment) {
auto view_state = GetAttachmentImageViewState(fb_state, attachment);
if (view_state && (img_bar_image == view_state->create_info.image)) {
image_match = true;
attach_index = attachment;
break;
}
}
if (image_match) { // Make sure subpass is referring to matching attachment
if (sub_desc.pDepthStencilAttachment && sub_desc.pDepthStencilAttachment->attachment == attach_index) {
sub_image_layout = sub_desc.pDepthStencilAttachment->layout;
sub_image_found = true;
}
if (!sub_image_found && device_extensions.vk_khr_depth_stencil_resolve) {
const auto *resolve = lvl_find_in_chain<VkSubpassDescriptionDepthStencilResolveKHR>(sub_desc.pNext);
if (resolve && resolve->pDepthStencilResolveAttachment &&
resolve->pDepthStencilResolveAttachment->attachment == attach_index) {
sub_image_layout = resolve->pDepthStencilResolveAttachment->layout;
sub_image_found = true;
}
}
if (!sub_image_found) {
for (uint32_t j = 0; j < sub_desc.colorAttachmentCount; ++j) {
if (sub_desc.pColorAttachments && sub_desc.pColorAttachments[j].attachment == attach_index) {
sub_image_layout = sub_desc.pColorAttachments[j].layout;
sub_image_found = true;
break;
}
if (!sub_image_found && sub_desc.pResolveAttachments &&
sub_desc.pResolveAttachments[j].attachment == attach_index) {
sub_image_layout = sub_desc.pResolveAttachments[j].layout;
sub_image_found = true;
break;
}
}
}
if (!sub_image_found) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
rp_handle.handle, "VUID-vkCmdPipelineBarrier-image-02635",
"%s: Barrier pImageMemoryBarriers[%d].%s is not referenced by the VkSubpassDescription for "
"active subpass (%d) of current %s.",
funcName, img_index, report_data->FormatHandle(img_bar_image).c_str(), active_subpass,
report_data->FormatHandle(rp_handle).c_str());
}
} else { // !image_match
auto const fb_handle = HandleToUint64(fb_state->framebuffer);
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT, fb_handle,
"VUID-vkCmdPipelineBarrier-image-02635",
"%s: Barrier pImageMemoryBarriers[%d].%s does not match an image from the current %s.", funcName, img_index,
report_data->FormatHandle(img_bar_image).c_str(), report_data->FormatHandle(fb_state->framebuffer).c_str());
}
if (img_barrier.oldLayout != img_barrier.newLayout) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_state->commandBuffer), "VUID-vkCmdPipelineBarrier-oldLayout-01181",
"%s: As the Image Barrier for %s is being executed within a render pass instance, oldLayout must "
"equal newLayout yet they are %s and %s.",
funcName, report_data->FormatHandle(img_barrier.image).c_str(), string_VkImageLayout(img_barrier.oldLayout),
string_VkImageLayout(img_barrier.newLayout));
} else {
if (sub_image_found && sub_image_layout != img_barrier.oldLayout) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
rp_handle.handle, "VUID-vkCmdPipelineBarrier-oldLayout-02636",
"%s: Barrier pImageMemoryBarriers[%d].%s is referenced by the VkSubpassDescription for active "
"subpass (%d) of current %s as having layout %s, but image barrier has layout %s.",
funcName, img_index, report_data->FormatHandle(img_bar_image).c_str(), active_subpass,
report_data->FormatHandle(rp_handle).c_str(), string_VkImageLayout(sub_image_layout),
string_VkImageLayout(img_barrier.oldLayout));
}
}
return skip;
}
// Validate image barriers within a renderPass
bool CoreChecks::ValidateRenderPassImageBarriers(const char *funcName, const CMD_BUFFER_STATE *cb_state, uint32_t active_subpass,
const safe_VkSubpassDescription2KHR &sub_desc, const VulkanTypedHandle &rp_handle,
const safe_VkSubpassDependency2KHR *dependencies,
const std::vector<uint32_t> &self_dependencies, uint32_t image_mem_barrier_count,
const VkImageMemoryBarrier *image_barriers) const {
bool skip = false;
for (uint32_t i = 0; i < image_mem_barrier_count; ++i) {
const auto &img_barrier = image_barriers[i];
const auto &img_src_access_mask = img_barrier.srcAccessMask;
const auto &img_dst_access_mask = img_barrier.dstAccessMask;
bool access_mask_match = false;
for (const auto self_dep_index : self_dependencies) {
const auto &sub_dep = dependencies[self_dep_index];
access_mask_match = (img_src_access_mask == (sub_dep.srcAccessMask & img_src_access_mask)) &&
(img_dst_access_mask == (sub_dep.dstAccessMask & img_dst_access_mask));
if (access_mask_match) break;
}
if (!access_mask_match) {
std::stringstream self_dep_ss;
stream_join(self_dep_ss, ", ", self_dependencies);
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
rp_handle.handle, "VUID-vkCmdPipelineBarrier-pDependencies-02285",
"%s: Barrier pImageMemoryBarriers[%d].srcAccessMask(0x%X) is not a subset of VkSubpassDependency "
"srcAccessMask of subpass %d of %s. Candidate VkSubpassDependency are pDependencies entries [%s].",
funcName, i, img_src_access_mask, active_subpass, report_data->FormatHandle(rp_handle).c_str(),
self_dep_ss.str().c_str());
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
rp_handle.handle, "VUID-vkCmdPipelineBarrier-pDependencies-02285",
"%s: Barrier pImageMemoryBarriers[%d].dstAccessMask(0x%X) is not a subset of VkSubpassDependency "
"dstAccessMask of subpass %d of %s. Candidate VkSubpassDependency are pDependencies entries [%s].",
funcName, i, img_dst_access_mask, active_subpass, report_data->FormatHandle(rp_handle).c_str(),
self_dep_ss.str().c_str());
}
if (VK_QUEUE_FAMILY_IGNORED != img_barrier.srcQueueFamilyIndex ||
VK_QUEUE_FAMILY_IGNORED != img_barrier.dstQueueFamilyIndex) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
rp_handle.handle, "VUID-vkCmdPipelineBarrier-srcQueueFamilyIndex-01182",
"%s: Barrier pImageMemoryBarriers[%d].srcQueueFamilyIndex is %d and "
"pImageMemoryBarriers[%d].dstQueueFamilyIndex is %d but both must be VK_QUEUE_FAMILY_IGNORED.",
funcName, i, img_barrier.srcQueueFamilyIndex, i, img_barrier.dstQueueFamilyIndex);
}
// Secondary CBs can have null framebuffer so record will queue up validation in that case 'til FB is known
if (VK_NULL_HANDLE != cb_state->activeFramebuffer) {
skip |= ValidateImageBarrierAttachment(funcName, cb_state, cb_state->activeFramebuffer, active_subpass, sub_desc,
rp_handle, i, img_barrier);
}
}
return skip;
}
// Validate VUs for Pipeline Barriers that are within a renderPass
// Pre: cb_state->activeRenderPass must be a pointer to valid renderPass state
bool CoreChecks::ValidateRenderPassPipelineBarriers(const char *funcName, const CMD_BUFFER_STATE *cb_state,
VkPipelineStageFlags src_stage_mask, VkPipelineStageFlags dst_stage_mask,
VkDependencyFlags dependency_flags, uint32_t mem_barrier_count,
const VkMemoryBarrier *mem_barriers, uint32_t buffer_mem_barrier_count,
const VkBufferMemoryBarrier *buffer_mem_barriers,
uint32_t image_mem_barrier_count,
const VkImageMemoryBarrier *image_barriers) const {
bool skip = false;
const auto rp_state = cb_state->activeRenderPass;
const auto active_subpass = cb_state->activeSubpass;
const VulkanTypedHandle rp_handle(rp_state->renderPass, kVulkanObjectTypeRenderPass);
const auto &self_dependencies = rp_state->self_dependencies[active_subpass];
const auto &dependencies = rp_state->createInfo.pDependencies;
if (self_dependencies.size() == 0) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT, rp_handle.handle,
"VUID-vkCmdPipelineBarrier-pDependencies-02285",
"%s: Barriers cannot be set during subpass %d of %s with no self-dependency specified.", funcName,
active_subpass, report_data->FormatHandle(rp_handle).c_str());
} else {
// Grab ref to current subpassDescription up-front for use below
const auto &sub_desc = rp_state->createInfo.pSubpasses[active_subpass];
// Look for matching mask in any self-dependency
bool stage_mask_match = false;
for (const auto self_dep_index : self_dependencies) {
const auto &sub_dep = dependencies[self_dep_index];
const auto &sub_src_stage_mask = ExpandPipelineStageFlags(device_extensions, sub_dep.srcStageMask);
const auto &sub_dst_stage_mask = ExpandPipelineStageFlags(device_extensions, sub_dep.dstStageMask);
stage_mask_match = ((sub_src_stage_mask == VK_PIPELINE_STAGE_ALL_COMMANDS_BIT) ||
(src_stage_mask == (sub_src_stage_mask & src_stage_mask))) &&
((sub_dst_stage_mask == VK_PIPELINE_STAGE_ALL_COMMANDS_BIT) ||
(dst_stage_mask == (sub_dst_stage_mask & dst_stage_mask)));
if (stage_mask_match) break;
}
if (!stage_mask_match) {
std::stringstream self_dep_ss;
stream_join(self_dep_ss, ", ", self_dependencies);
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
rp_handle.handle, "VUID-vkCmdPipelineBarrier-pDependencies-02285",
"%s: Barrier srcStageMask(0x%X) is not a subset of VkSubpassDependency srcStageMask of any "
"self-dependency of subpass %d of %s for which dstStageMask is also a subset. "
"Candidate VkSubpassDependency are pDependencies entries [%s].",
funcName, src_stage_mask, active_subpass, report_data->FormatHandle(rp_handle).c_str(),
self_dep_ss.str().c_str());
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
rp_handle.handle, "VUID-vkCmdPipelineBarrier-pDependencies-02285",
"%s: Barrier dstStageMask(0x%X) is not a subset of VkSubpassDependency dstStageMask of any "
"self-dependency of subpass %d of %s for which srcStageMask is also a subset. "
"Candidate VkSubpassDependency are pDependencies entries [%s].",
funcName, dst_stage_mask, active_subpass, report_data->FormatHandle(rp_handle).c_str(),
self_dep_ss.str().c_str());
}
if (0 != buffer_mem_barrier_count) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
rp_handle.handle, "VUID-vkCmdPipelineBarrier-bufferMemoryBarrierCount-01178",
"%s: bufferMemoryBarrierCount is non-zero (%d) for subpass %d of %s.", funcName,
buffer_mem_barrier_count, active_subpass, report_data->FormatHandle(rp_handle).c_str());
}
for (uint32_t i = 0; i < mem_barrier_count; ++i) {
const auto &mb_src_access_mask = mem_barriers[i].srcAccessMask;
const auto &mb_dst_access_mask = mem_barriers[i].dstAccessMask;
bool access_mask_match = false;
for (const auto self_dep_index : self_dependencies) {
const auto &sub_dep = dependencies[self_dep_index];
access_mask_match = (mb_src_access_mask == (sub_dep.srcAccessMask & mb_src_access_mask)) &&
(mb_dst_access_mask == (sub_dep.dstAccessMask & mb_dst_access_mask));
if (access_mask_match) break;
}
if (!access_mask_match) {
std::stringstream self_dep_ss;
stream_join(self_dep_ss, ", ", self_dependencies);
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT, rp_handle.handle,
"VUID-vkCmdPipelineBarrier-pDependencies-02285",
"%s: Barrier pMemoryBarriers[%d].srcAccessMask(0x%X) is not a subset of VkSubpassDependency srcAccessMask "
"for any self-dependency of subpass %d of %s for which dstAccessMask is also a subset. "
"Candidate VkSubpassDependency are pDependencies entries [%s].",
funcName, i, mb_src_access_mask, active_subpass, report_data->FormatHandle(rp_handle).c_str(),
self_dep_ss.str().c_str());
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT, rp_handle.handle,
"VUID-vkCmdPipelineBarrier-pDependencies-02285",
"%s: Barrier pMemoryBarriers[%d].dstAccessMask(0x%X) is not a subset of VkSubpassDependency dstAccessMask "
"for any self-dependency of subpass %d of %s for which srcAccessMask is also a subset. "
"Candidate VkSubpassDependency are pDependencies entries [%s].",
funcName, i, mb_dst_access_mask, active_subpass, report_data->FormatHandle(rp_handle).c_str(),
self_dep_ss.str().c_str());
}
}
skip |= ValidateRenderPassImageBarriers(funcName, cb_state, active_subpass, sub_desc, rp_handle, dependencies,
self_dependencies, image_mem_barrier_count, image_barriers);
bool flag_match = false;
for (const auto self_dep_index : self_dependencies) {
const auto &sub_dep = dependencies[self_dep_index];
flag_match = sub_dep.dependencyFlags == dependency_flags;
if (flag_match) break;
}
if (!flag_match) {
std::stringstream self_dep_ss;
stream_join(self_dep_ss, ", ", self_dependencies);
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
rp_handle.handle, "VUID-vkCmdPipelineBarrier-pDependencies-02285",
"%s: dependencyFlags param (0x%X) does not equal VkSubpassDependency dependencyFlags value for any "
"self-dependency of subpass %d of %s. Candidate VkSubpassDependency are pDependencies entries [%s].",
funcName, dependency_flags, cb_state->activeSubpass, report_data->FormatHandle(rp_handle).c_str(),
self_dep_ss.str().c_str());
}
}
return skip;
}
// Array to mask individual accessMask to corresponding stageMask
// accessMask active bit position (0-31) maps to index
const static VkPipelineStageFlags AccessMaskToPipeStage[28] = {
// VK_ACCESS_INDIRECT_COMMAND_READ_BIT = 0
VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT,
// VK_ACCESS_INDEX_READ_BIT = 1
VK_PIPELINE_STAGE_VERTEX_INPUT_BIT,
// VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT = 2
VK_PIPELINE_STAGE_VERTEX_INPUT_BIT,
// VK_ACCESS_UNIFORM_READ_BIT = 3
VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT |
VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT | VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT |
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT | VK_PIPELINE_STAGE_TASK_SHADER_BIT_NV |
VK_PIPELINE_STAGE_MESH_SHADER_BIT_NV | VK_PIPELINE_STAGE_RAY_TRACING_SHADER_BIT_NV,
// VK_ACCESS_INPUT_ATTACHMENT_READ_BIT = 4
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
// VK_ACCESS_SHADER_READ_BIT = 5
VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT |
VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT | VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT |
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT | VK_PIPELINE_STAGE_TASK_SHADER_BIT_NV |
VK_PIPELINE_STAGE_MESH_SHADER_BIT_NV | VK_PIPELINE_STAGE_RAY_TRACING_SHADER_BIT_NV,
// VK_ACCESS_SHADER_WRITE_BIT = 6
VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT |
VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT | VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT |
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT | VK_PIPELINE_STAGE_TASK_SHADER_BIT_NV |
VK_PIPELINE_STAGE_MESH_SHADER_BIT_NV | VK_PIPELINE_STAGE_RAY_TRACING_SHADER_BIT_NV,
// VK_ACCESS_COLOR_ATTACHMENT_READ_BIT = 7
VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
// VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT = 8
VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
// VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT = 9
VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT,
// VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT = 10
VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT,
// VK_ACCESS_TRANSFER_READ_BIT = 11
VK_PIPELINE_STAGE_TRANSFER_BIT,
// VK_ACCESS_TRANSFER_WRITE_BIT = 12
VK_PIPELINE_STAGE_TRANSFER_BIT,
// VK_ACCESS_HOST_READ_BIT = 13
VK_PIPELINE_STAGE_HOST_BIT,
// VK_ACCESS_HOST_WRITE_BIT = 14
VK_PIPELINE_STAGE_HOST_BIT,
// VK_ACCESS_MEMORY_READ_BIT = 15
VK_ACCESS_FLAG_BITS_MAX_ENUM, // Always match
// VK_ACCESS_MEMORY_WRITE_BIT = 16
VK_ACCESS_FLAG_BITS_MAX_ENUM, // Always match
// VK_ACCESS_COMMAND_PROCESS_READ_BIT_NVX = 17
VK_PIPELINE_STAGE_COMMAND_PROCESS_BIT_NVX,
// VK_ACCESS_COMMAND_PROCESS_WRITE_BIT_NVX = 18
VK_PIPELINE_STAGE_COMMAND_PROCESS_BIT_NVX,
// VK_ACCESS_COLOR_ATTACHMENT_READ_NONCOHERENT_BIT_EXT = 19
VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
// VK_ACCESS_CONDITIONAL_RENDERING_READ_BIT_EXT = 20
VK_PIPELINE_STAGE_CONDITIONAL_RENDERING_BIT_EXT,
// VK_ACCESS_ACCELERATION_STRUCTURE_READ_BIT_NV = 21
VK_PIPELINE_STAGE_RAY_TRACING_SHADER_BIT_NV | VK_PIPELINE_STAGE_ACCELERATION_STRUCTURE_BUILD_BIT_NV,
// VK_ACCESS_ACCELERATION_STRUCTURE_WRITE_BIT_NV = 22
VK_PIPELINE_STAGE_ACCELERATION_STRUCTURE_BUILD_BIT_NV,
// VK_ACCESS_SHADING_RATE_IMAGE_READ_BIT_NV = 23
VK_PIPELINE_STAGE_SHADING_RATE_IMAGE_BIT_NV,
// VK_ACCESS_FRAGMENT_DENSITY_MAP_READ_BIT_EXT = 24
VK_PIPELINE_STAGE_FRAGMENT_DENSITY_PROCESS_BIT_EXT,
// VK_ACCESS_TRANSFORM_FEEDBACK_WRITE_BIT_EXT = 25
VK_PIPELINE_STAGE_TRANSFORM_FEEDBACK_BIT_EXT,
// VK_ACCESS_TRANSFORM_FEEDBACK_COUNTER_READ_BIT_EXT = 26
VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT,
// VK_ACCESS_TRANSFORM_FEEDBACK_COUNTER_WRITE_BIT_EXT = 27
VK_PIPELINE_STAGE_TRANSFORM_FEEDBACK_BIT_EXT,
};
// Verify that all bits of access_mask are supported by the src_stage_mask
static bool ValidateAccessMaskPipelineStage(const DeviceExtensions &extensions, VkAccessFlags access_mask,
VkPipelineStageFlags stage_mask) {
// Early out if all commands set, or access_mask NULL
if ((stage_mask & VK_PIPELINE_STAGE_ALL_COMMANDS_BIT) || (0 == access_mask)) return true;
stage_mask = ExpandPipelineStageFlags(extensions, stage_mask);
int index = 0;
// for each of the set bits in access_mask, make sure that supporting stage mask bit(s) are set
while (access_mask) {
index = (u_ffs(access_mask) - 1);
assert(index >= 0);
// Must have "!= 0" compare to prevent warning from MSVC
if ((AccessMaskToPipeStage[index] & stage_mask) == 0) return false; // early out
access_mask &= ~(1 << index); // Mask off bit that's been checked
}
return true;
}
namespace barrier_queue_families {
enum VuIndex {
kSrcOrDstMustBeIgnore,
kSpecialOrIgnoreOnly,
kSrcIgnoreRequiresDstIgnore,
kDstValidOrSpecialIfNotIgnore,
kSrcValidOrSpecialIfNotIgnore,
kSrcAndDestMustBeIgnore,
kBothIgnoreOrBothValid,
kSubmitQueueMustMatchSrcOrDst
};
static const char *vu_summary[] = {"Source or destination queue family must be ignored.",
"Source or destination queue family must be special or ignored.",
"Destination queue family must be ignored if source queue family is.",
"Destination queue family must be valid, ignored, or special.",
"Source queue family must be valid, ignored, or special.",
"Source and destination queue family must both be ignored.",
"Source and destination queue family must both be ignore or both valid.",
"Source or destination queue family must match submit queue family, if not ignored."};
static const std::string image_error_codes[] = {
"VUID-VkImageMemoryBarrier-image-01381", // kSrcOrDstMustBeIgnore
"VUID-VkImageMemoryBarrier-image-01766", // kSpecialOrIgnoreOnly
"VUID-VkImageMemoryBarrier-image-01201", // kSrcIgnoreRequiresDstIgnore
"VUID-VkImageMemoryBarrier-image-01768", // kDstValidOrSpecialIfNotIgnore
"VUID-VkImageMemoryBarrier-image-01767", // kSrcValidOrSpecialIfNotIgnore
"VUID-VkImageMemoryBarrier-image-01199", // kSrcAndDestMustBeIgnore
"VUID-VkImageMemoryBarrier-image-01200", // kBothIgnoreOrBothValid
"VUID-VkImageMemoryBarrier-image-01205", // kSubmitQueueMustMatchSrcOrDst
};
static const std::string buffer_error_codes[] = {
"VUID-VkBufferMemoryBarrier-buffer-01191", // kSrcOrDstMustBeIgnore
"VUID-VkBufferMemoryBarrier-buffer-01763", // kSpecialOrIgnoreOnly
"VUID-VkBufferMemoryBarrier-buffer-01193", // kSrcIgnoreRequiresDstIgnore
"VUID-VkBufferMemoryBarrier-buffer-01765", // kDstValidOrSpecialIfNotIgnore
"VUID-VkBufferMemoryBarrier-buffer-01764", // kSrcValidOrSpecialIfNotIgnore
"VUID-VkBufferMemoryBarrier-buffer-01190", // kSrcAndDestMustBeIgnore
"VUID-VkBufferMemoryBarrier-buffer-01192", // kBothIgnoreOrBothValid
"VUID-VkBufferMemoryBarrier-buffer-01196", // kSubmitQueueMustMatchSrcOrDst
};
class ValidatorState {
public:
ValidatorState(const ValidationStateTracker *device_data, const char *func_name, const CMD_BUFFER_STATE *cb_state,
const VulkanTypedHandle &barrier_handle, const VkSharingMode sharing_mode)
: report_data_(device_data->report_data),
func_name_(func_name),
cb_handle64_(HandleToUint64(cb_state->commandBuffer)),
barrier_handle_(barrier_handle),
sharing_mode_(sharing_mode),
val_codes_(barrier_handle.type == kVulkanObjectTypeImage ? image_error_codes : buffer_error_codes),
limit_(static_cast<uint32_t>(device_data->physical_device_state->queue_family_properties.size())),
mem_ext_(IsExtEnabled(device_data->device_extensions.vk_khr_external_memory)) {}
// Log the messages using boilerplate from object state, and Vu specific information from the template arg
// One and two family versions, in the single family version, Vu holds the name of the passed parameter
bool LogMsg(VuIndex vu_index, uint32_t family, const char *param_name) const {
const std::string &val_code = val_codes_[vu_index];
const char *annotation = GetFamilyAnnotation(family);
return log_msg(report_data_, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, cb_handle64_,
val_code, "%s: Barrier using %s %s created with sharingMode %s, has %s %u%s. %s", func_name_,
GetTypeString(), report_data_->FormatHandle(barrier_handle_).c_str(), GetModeString(), param_name, family,
annotation, vu_summary[vu_index]);
}
bool LogMsg(VuIndex vu_index, uint32_t src_family, uint32_t dst_family) const {
const std::string &val_code = val_codes_[vu_index];
const char *src_annotation = GetFamilyAnnotation(src_family);
const char *dst_annotation = GetFamilyAnnotation(dst_family);
return log_msg(
report_data_, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, cb_handle64_, val_code,
"%s: Barrier using %s %s created with sharingMode %s, has srcQueueFamilyIndex %u%s and dstQueueFamilyIndex %u%s. %s",
func_name_, GetTypeString(), report_data_->FormatHandle(barrier_handle_).c_str(), GetModeString(), src_family,
src_annotation, dst_family, dst_annotation, vu_summary[vu_index]);
}
// This abstract Vu can only be tested at submit time, thus we need a callback from the closure containing the needed
// data. Note that the mem_barrier is copied to the closure as the lambda lifespan exceed the guarantees of validity for
// application input.
static bool ValidateAtQueueSubmit(const QUEUE_STATE *queue_state, const ValidationStateTracker *device_data,
uint32_t src_family, uint32_t dst_family, const ValidatorState &val) {
uint32_t queue_family = queue_state->queueFamilyIndex;
if ((src_family != queue_family) && (dst_family != queue_family)) {
const std::string &val_code = val.val_codes_[kSubmitQueueMustMatchSrcOrDst];
const char *src_annotation = val.GetFamilyAnnotation(src_family);
const char *dst_annotation = val.GetFamilyAnnotation(dst_family);
return log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT,
HandleToUint64(queue_state->queue), val_code,
"%s: Barrier submitted to queue with family index %u, using %s %s created with sharingMode %s, has "
"srcQueueFamilyIndex %u%s and dstQueueFamilyIndex %u%s. %s",
"vkQueueSubmit", queue_family, val.GetTypeString(),
device_data->report_data->FormatHandle(val.barrier_handle_).c_str(), val.GetModeString(), src_family,
src_annotation, dst_family, dst_annotation, vu_summary[kSubmitQueueMustMatchSrcOrDst]);
}
return false;
}
// Logical helpers for semantic clarity
inline bool KhrExternalMem() const { return mem_ext_; }
inline bool IsValid(uint32_t queue_family) const { return (queue_family < limit_); }
inline bool IsValidOrSpecial(uint32_t queue_family) const {
return IsValid(queue_family) || (mem_ext_ && QueueFamilyIsSpecial(queue_family));
}
// Helpers for LogMsg (and log_msg)
const char *GetModeString() const { return string_VkSharingMode(sharing_mode_); }
// Descriptive text for the various types of queue family index
const char *GetFamilyAnnotation(uint32_t family) const {
const char *external = " (VK_QUEUE_FAMILY_EXTERNAL_KHR)";
const char *foreign = " (VK_QUEUE_FAMILY_FOREIGN_EXT)";
const char *ignored = " (VK_QUEUE_FAMILY_IGNORED)";
const char *valid = " (VALID)";
const char *invalid = " (INVALID)";
switch (family) {
case VK_QUEUE_FAMILY_EXTERNAL_KHR:
return external;
case VK_QUEUE_FAMILY_FOREIGN_EXT:
return foreign;
case VK_QUEUE_FAMILY_IGNORED:
return ignored;
default:
if (IsValid(family)) {
return valid;
}
return invalid;
};
}
const char *GetTypeString() const { return object_string[barrier_handle_.type]; }
VkSharingMode GetSharingMode() const { return sharing_mode_; }
protected:
const debug_report_data *const report_data_;
const char *const func_name_;
const uint64_t cb_handle64_;
const VulkanTypedHandle barrier_handle_;
const VkSharingMode sharing_mode_;
const std::string *val_codes_;
const uint32_t limit_;
const bool mem_ext_;
};
bool Validate(const CoreChecks *device_data, const char *func_name, const CMD_BUFFER_STATE *cb_state, const ValidatorState &val,
const uint32_t src_queue_family, const uint32_t dst_queue_family) {
bool skip = false;
const bool mode_concurrent = val.GetSharingMode() == VK_SHARING_MODE_CONCURRENT;
const bool src_ignored = QueueFamilyIsIgnored(src_queue_family);
const bool dst_ignored = QueueFamilyIsIgnored(dst_queue_family);
if (val.KhrExternalMem()) {
if (mode_concurrent) {
if (!(src_ignored || dst_ignored)) {
skip |= val.LogMsg(kSrcOrDstMustBeIgnore, src_queue_family, dst_queue_family);
}
if ((src_ignored && !(dst_ignored || QueueFamilyIsSpecial(dst_queue_family))) ||
(dst_ignored && !(src_ignored || QueueFamilyIsSpecial(src_queue_family)))) {
skip |= val.LogMsg(kSpecialOrIgnoreOnly, src_queue_family, dst_queue_family);
}
} else {
// VK_SHARING_MODE_EXCLUSIVE
if (src_ignored && !dst_ignored) {
skip |= val.LogMsg(kSrcIgnoreRequiresDstIgnore, src_queue_family, dst_queue_family);
}
if (!dst_ignored && !val.IsValidOrSpecial(dst_queue_family)) {
skip |= val.LogMsg(kDstValidOrSpecialIfNotIgnore, dst_queue_family, "dstQueueFamilyIndex");
}
if (!src_ignored && !val.IsValidOrSpecial(src_queue_family)) {
skip |= val.LogMsg(kSrcValidOrSpecialIfNotIgnore, src_queue_family, "srcQueueFamilyIndex");
}
}
} else {
// No memory extension
if (mode_concurrent) {
if (!src_ignored || !dst_ignored) {
skip |= val.LogMsg(kSrcAndDestMustBeIgnore, src_queue_family, dst_queue_family);
}
} else {
// VK_SHARING_MODE_EXCLUSIVE
if (!((src_ignored && dst_ignored) || (val.IsValid(src_queue_family) && val.IsValid(dst_queue_family)))) {
skip |= val.LogMsg(kBothIgnoreOrBothValid, src_queue_family, dst_queue_family);
}
}
}
return skip;
}
} // namespace barrier_queue_families
bool CoreChecks::ValidateConcurrentBarrierAtSubmit(const ValidationStateTracker *state_data, const QUEUE_STATE *queue_state,
const char *func_name, const CMD_BUFFER_STATE *cb_state,
const VulkanTypedHandle &typed_handle, uint32_t src_queue_family,
uint32_t dst_queue_family) {
using barrier_queue_families::ValidatorState;
ValidatorState val(state_data, func_name, cb_state, typed_handle, VK_SHARING_MODE_CONCURRENT);
return ValidatorState::ValidateAtQueueSubmit(queue_state, state_data, src_queue_family, dst_queue_family, val);
}
// Type specific wrapper for image barriers
bool CoreChecks::ValidateBarrierQueueFamilies(const char *func_name, const CMD_BUFFER_STATE *cb_state,
const VkImageMemoryBarrier &barrier, const IMAGE_STATE *state_data) const {
// State data is required
if (!state_data) {
return false;
}
// Create the validator state from the image state
barrier_queue_families::ValidatorState val(this, func_name, cb_state, VulkanTypedHandle(barrier.image, kVulkanObjectTypeImage),
state_data->createInfo.sharingMode);
const uint32_t src_queue_family = barrier.srcQueueFamilyIndex;
const uint32_t dst_queue_family = barrier.dstQueueFamilyIndex;
return barrier_queue_families::Validate(this, func_name, cb_state, val, src_queue_family, dst_queue_family);
}
// Type specific wrapper for buffer barriers
bool CoreChecks::ValidateBarrierQueueFamilies(const char *func_name, const CMD_BUFFER_STATE *cb_state,
const VkBufferMemoryBarrier &barrier, const BUFFER_STATE *state_data) const {
// State data is required
if (!state_data) {
return false;
}
// Create the validator state from the buffer state
barrier_queue_families::ValidatorState val(
this, func_name, cb_state, VulkanTypedHandle(barrier.buffer, kVulkanObjectTypeBuffer), state_data->createInfo.sharingMode);
const uint32_t src_queue_family = barrier.srcQueueFamilyIndex;
const uint32_t dst_queue_family = barrier.dstQueueFamilyIndex;
return barrier_queue_families::Validate(this, func_name, cb_state, val, src_queue_family, dst_queue_family);
}
bool CoreChecks::ValidateBarriers(const char *funcName, const CMD_BUFFER_STATE *cb_state, VkPipelineStageFlags src_stage_mask,
VkPipelineStageFlags dst_stage_mask, uint32_t memBarrierCount,
const VkMemoryBarrier *pMemBarriers, uint32_t bufferBarrierCount,
const VkBufferMemoryBarrier *pBufferMemBarriers, uint32_t imageMemBarrierCount,
const VkImageMemoryBarrier *pImageMemBarriers) const {
bool skip = false;
for (uint32_t i = 0; i < memBarrierCount; ++i) {
const auto &mem_barrier = pMemBarriers[i];
if (!ValidateAccessMaskPipelineStage(device_extensions, mem_barrier.srcAccessMask, src_stage_mask)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_state->commandBuffer), "VUID-vkCmdPipelineBarrier-srcAccessMask-02815",
"%s: pMemBarriers[%d].srcAccessMask (0x%X) is not supported by srcStageMask (0x%X).", funcName, i,
mem_barrier.srcAccessMask, src_stage_mask);
}
if (!ValidateAccessMaskPipelineStage(device_extensions, mem_barrier.dstAccessMask, dst_stage_mask)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_state->commandBuffer), "VUID-vkCmdPipelineBarrier-dstAccessMask-02816",
"%s: pMemBarriers[%d].dstAccessMask (0x%X) is not supported by dstStageMask (0x%X).", funcName, i,
mem_barrier.dstAccessMask, dst_stage_mask);
}
}
for (uint32_t i = 0; i < imageMemBarrierCount; ++i) {
const auto &mem_barrier = pImageMemBarriers[i];
if (!ValidateAccessMaskPipelineStage(device_extensions, mem_barrier.srcAccessMask, src_stage_mask)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_state->commandBuffer), "VUID-vkCmdPipelineBarrier-srcAccessMask-02815",
"%s: pImageMemBarriers[%d].srcAccessMask (0x%X) is not supported by srcStageMask (0x%X).", funcName, i,
mem_barrier.srcAccessMask, src_stage_mask);
}
if (!ValidateAccessMaskPipelineStage(device_extensions, mem_barrier.dstAccessMask, dst_stage_mask)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_state->commandBuffer), "VUID-vkCmdPipelineBarrier-dstAccessMask-02816",
"%s: pImageMemBarriers[%d].dstAccessMask (0x%X) is not supported by dstStageMask (0x%X).", funcName, i,
mem_barrier.dstAccessMask, dst_stage_mask);
}
auto image_data = GetImageState(mem_barrier.image);
skip |= ValidateBarrierQueueFamilies(funcName, cb_state, mem_barrier, image_data);
if (mem_barrier.newLayout == VK_IMAGE_LAYOUT_UNDEFINED || mem_barrier.newLayout == VK_IMAGE_LAYOUT_PREINITIALIZED) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_state->commandBuffer), "VUID-VkImageMemoryBarrier-newLayout-01198",
"%s: Image Layout cannot be transitioned to UNDEFINED or PREINITIALIZED.", funcName);
}
if (image_data) {
// There is no VUID for this, but there is blanket text:
// "Non-sparse resources must be bound completely and contiguously to a single VkDeviceMemory object before
// recording commands in a command buffer."
// TODO: Update this when VUID is defined
skip |= ValidateMemoryIsBoundToImage(image_data, funcName, kVUIDUndefined);
const auto aspect_mask = mem_barrier.subresourceRange.aspectMask;
skip |= ValidateImageAspectMask(image_data->image, image_data->createInfo.format, aspect_mask, funcName);
const std::string param_name = "pImageMemoryBarriers[" + std::to_string(i) + "].subresourceRange";
skip |= ValidateImageBarrierSubresourceRange(image_data, mem_barrier.subresourceRange, funcName, param_name.c_str());
}
}
for (uint32_t i = 0; i < bufferBarrierCount; ++i) {
const auto &mem_barrier = pBufferMemBarriers[i];
if (!ValidateAccessMaskPipelineStage(device_extensions, mem_barrier.srcAccessMask, src_stage_mask)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_state->commandBuffer), "VUID-vkCmdPipelineBarrier-srcAccessMask-02815",
"%s: pBufferMemBarriers[%d].srcAccessMask (0x%X) is not supported by srcStageMask (0x%X).", funcName, i,
mem_barrier.srcAccessMask, src_stage_mask);
}
if (!ValidateAccessMaskPipelineStage(device_extensions, mem_barrier.dstAccessMask, dst_stage_mask)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_state->commandBuffer), "VUID-vkCmdPipelineBarrier-dstAccessMask-02816",
"%s: pBufferMemBarriers[%d].dstAccessMask (0x%X) is not supported by dstStageMask (0x%X).", funcName, i,
mem_barrier.dstAccessMask, dst_stage_mask);
}
// Validate buffer barrier queue family indices
auto buffer_state = GetBufferState(mem_barrier.buffer);
skip |= ValidateBarrierQueueFamilies(funcName, cb_state, mem_barrier, buffer_state);
if (buffer_state) {
// There is no VUID for this, but there is blanket text:
// "Non-sparse resources must be bound completely and contiguously to a single VkDeviceMemory object before
// recording commands in a command buffer"
// TODO: Update this when VUID is defined
skip |= ValidateMemoryIsBoundToBuffer(buffer_state, funcName, kVUIDUndefined);
auto buffer_size = buffer_state->createInfo.size;
if (mem_barrier.offset >= buffer_size) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_state->commandBuffer), "VUID-VkBufferMemoryBarrier-offset-01187",
"%s: Buffer Barrier %s has offset 0x%" PRIx64 " which is not less than total size 0x%" PRIx64 ".",
funcName, report_data->FormatHandle(mem_barrier.buffer).c_str(), HandleToUint64(mem_barrier.offset),
HandleToUint64(buffer_size));
} else if (mem_barrier.size != VK_WHOLE_SIZE && (mem_barrier.offset + mem_barrier.size > buffer_size)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_state->commandBuffer), "VUID-VkBufferMemoryBarrier-size-01189",
"%s: Buffer Barrier %s has offset 0x%" PRIx64 " and size 0x%" PRIx64
" whose sum is greater than total size 0x%" PRIx64 ".",
funcName, report_data->FormatHandle(mem_barrier.buffer).c_str(), HandleToUint64(mem_barrier.offset),
HandleToUint64(mem_barrier.size), HandleToUint64(buffer_size));
}
}
}
skip |= ValidateBarriersQFOTransferUniqueness(funcName, cb_state, bufferBarrierCount, pBufferMemBarriers, imageMemBarrierCount,
pImageMemBarriers);
return skip;
}
bool CoreChecks::ValidateEventStageMask(const ValidationStateTracker *state_data, const CMD_BUFFER_STATE *pCB, size_t eventCount,
size_t firstEventIndex, VkPipelineStageFlags sourceStageMask,
EventToStageMap *localEventToStageMap) {
bool skip = false;
VkPipelineStageFlags stageMask = 0;
const auto max_event = std::min((firstEventIndex + eventCount), pCB->events.size());
for (size_t event_index = firstEventIndex; event_index < max_event; ++event_index) {
auto event = pCB->events[event_index];
auto event_data = localEventToStageMap->find(event);
if (event_data != localEventToStageMap->end()) {
stageMask |= event_data->second;
} else {
auto global_event_data = state_data->GetEventState(event);
if (!global_event_data) {
skip |= log_msg(state_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT,
HandleToUint64(event), kVUID_Core_DrawState_InvalidEvent,
"%s cannot be waited on if it has never been set.",
state_data->report_data->FormatHandle(event).c_str());
} else {
stageMask |= global_event_data->stageMask;
}
}
}
// TODO: Need to validate that host_bit is only set if set event is called
// but set event can be called at any time.
if (sourceStageMask != stageMask && sourceStageMask != (stageMask | VK_PIPELINE_STAGE_HOST_BIT)) {
skip |= log_msg(state_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(pCB->commandBuffer), "VUID-vkCmdWaitEvents-srcStageMask-parameter",
"Submitting cmdbuffer with call to VkCmdWaitEvents using srcStageMask 0x%X which must be the bitwise OR of "
"the stageMask parameters used in calls to vkCmdSetEvent and VK_PIPELINE_STAGE_HOST_BIT if used with "
"vkSetEvent but instead is 0x%X.",
sourceStageMask, stageMask);
}
return skip;
}
// Note that we only check bits that HAVE required queueflags -- don't care entries are skipped
static std::unordered_map<VkPipelineStageFlags, VkQueueFlags> supported_pipeline_stages_table = {
{VK_PIPELINE_STAGE_COMMAND_PROCESS_BIT_NVX, VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT},
{VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT, VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT},
{VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, VK_QUEUE_GRAPHICS_BIT},
{VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, VK_QUEUE_GRAPHICS_BIT},
{VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT, VK_QUEUE_GRAPHICS_BIT},
{VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT, VK_QUEUE_GRAPHICS_BIT},
{VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT, VK_QUEUE_GRAPHICS_BIT},
{VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_QUEUE_GRAPHICS_BIT},
{VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT, VK_QUEUE_GRAPHICS_BIT},
{VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT, VK_QUEUE_GRAPHICS_BIT},
{VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_QUEUE_GRAPHICS_BIT},
{VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_QUEUE_COMPUTE_BIT},
{VK_PIPELINE_STAGE_TRANSFER_BIT, VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT | VK_QUEUE_TRANSFER_BIT},
{VK_PIPELINE_STAGE_CONDITIONAL_RENDERING_BIT_EXT, VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT},
{VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT, VK_QUEUE_GRAPHICS_BIT}};
static const VkPipelineStageFlags stage_flag_bit_array[] = {VK_PIPELINE_STAGE_COMMAND_PROCESS_BIT_NVX,
VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT,
VK_PIPELINE_STAGE_VERTEX_INPUT_BIT,
VK_PIPELINE_STAGE_VERTEX_SHADER_BIT,
VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT,
VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT,
VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT,
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT,
VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT,
VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
VK_PIPELINE_STAGE_TRANSFER_BIT,
VK_PIPELINE_STAGE_CONDITIONAL_RENDERING_BIT_EXT,
VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT};
bool CoreChecks::CheckStageMaskQueueCompatibility(VkCommandBuffer command_buffer, VkPipelineStageFlags stage_mask,
VkQueueFlags queue_flags, const char *function, const char *src_or_dest,
const char *error_code) const {
bool skip = false;
// Lookup each bit in the stagemask and check for overlap between its table bits and queue_flags
for (const auto &item : stage_flag_bit_array) {
if (stage_mask & item) {
if ((supported_pipeline_stages_table[item] & queue_flags) == 0) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(command_buffer), error_code,
"%s(): %s flag %s is not compatible with the queue family properties of this command buffer.",
function, src_or_dest, string_VkPipelineStageFlagBits(static_cast<VkPipelineStageFlagBits>(item)));
}
}
}
return skip;
}
// Check if all barriers are of a given operation type.
template <typename Barrier, typename OpCheck>
bool AllTransferOp(const COMMAND_POOL_STATE *pool, OpCheck &op_check, uint32_t count, const Barrier *barriers) {
if (!pool) return false;
for (uint32_t b = 0; b < count; b++) {
if (!op_check(pool, barriers + b)) return false;
}
return true;
}
// Look at the barriers to see if we they are all release or all acquire, the result impacts queue properties validation
BarrierOperationsType CoreChecks::ComputeBarrierOperationsType(const CMD_BUFFER_STATE *cb_state, uint32_t buffer_barrier_count,
const VkBufferMemoryBarrier *buffer_barriers,
uint32_t image_barrier_count,
const VkImageMemoryBarrier *image_barriers) const {
auto pool = cb_state->command_pool.get();
BarrierOperationsType op_type = kGeneral;
// Look at the barrier details only if they exist
// Note: AllTransferOp returns true for count == 0
if ((buffer_barrier_count + image_barrier_count) != 0) {
if (AllTransferOp(pool, TempIsReleaseOp<VkBufferMemoryBarrier>, buffer_barrier_count, buffer_barriers) &&
AllTransferOp(pool, TempIsReleaseOp<VkImageMemoryBarrier>, image_barrier_count, image_barriers)) {
op_type = kAllRelease;
} else if (AllTransferOp(pool, IsAcquireOp<VkBufferMemoryBarrier>, buffer_barrier_count, buffer_barriers) &&
AllTransferOp(pool, IsAcquireOp<VkImageMemoryBarrier>, image_barrier_count, image_barriers)) {
op_type = kAllAcquire;
}
}
return op_type;
}
bool CoreChecks::ValidateStageMasksAgainstQueueCapabilities(const CMD_BUFFER_STATE *cb_state,
VkPipelineStageFlags source_stage_mask,
VkPipelineStageFlags dest_stage_mask,
BarrierOperationsType barrier_op_type, const char *function,
const char *error_code) const {
bool skip = false;
uint32_t queue_family_index = cb_state->command_pool->queueFamilyIndex;
auto physical_device_state = GetPhysicalDeviceState();
// Any pipeline stage included in srcStageMask or dstStageMask must be supported by the capabilities of the queue family
// specified by the queueFamilyIndex member of the VkCommandPoolCreateInfo structure that was used to create the VkCommandPool
// that commandBuffer was allocated from, as specified in the table of supported pipeline stages.
if (queue_family_index < physical_device_state->queue_family_properties.size()) {
VkQueueFlags specified_queue_flags = physical_device_state->queue_family_properties[queue_family_index].queueFlags;
// Only check the source stage mask if any barriers aren't "acquire ownership"
if ((barrier_op_type != kAllAcquire) && (source_stage_mask & VK_PIPELINE_STAGE_ALL_COMMANDS_BIT) == 0) {
skip |= CheckStageMaskQueueCompatibility(cb_state->commandBuffer, source_stage_mask, specified_queue_flags, function,
"srcStageMask", error_code);
}
// Only check the dest stage mask if any barriers aren't "release ownership"
if ((barrier_op_type != kAllRelease) && (dest_stage_mask & VK_PIPELINE_STAGE_ALL_COMMANDS_BIT) == 0) {
skip |= CheckStageMaskQueueCompatibility(cb_state->commandBuffer, dest_stage_mask, specified_queue_flags, function,
"dstStageMask", error_code);
}
}
return skip;
}
bool CoreChecks::PreCallValidateCmdWaitEvents(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents,
VkPipelineStageFlags sourceStageMask, VkPipelineStageFlags dstStageMask,
uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
uint32_t imageMemoryBarrierCount,
const VkImageMemoryBarrier *pImageMemoryBarriers) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
auto barrier_op_type = ComputeBarrierOperationsType(cb_state, bufferMemoryBarrierCount, pBufferMemoryBarriers,
imageMemoryBarrierCount, pImageMemoryBarriers);
bool skip = ValidateStageMasksAgainstQueueCapabilities(cb_state, sourceStageMask, dstStageMask, barrier_op_type,
"vkCmdWaitEvents", "VUID-vkCmdWaitEvents-srcStageMask-01164");
skip |= ValidateStageMaskGsTsEnables(sourceStageMask, "vkCmdWaitEvents()", "VUID-vkCmdWaitEvents-srcStageMask-01159",
"VUID-vkCmdWaitEvents-srcStageMask-01161", "VUID-vkCmdWaitEvents-srcStageMask-02111",
"VUID-vkCmdWaitEvents-srcStageMask-02112");
skip |= ValidateStageMaskGsTsEnables(dstStageMask, "vkCmdWaitEvents()", "VUID-vkCmdWaitEvents-dstStageMask-01160",
"VUID-vkCmdWaitEvents-dstStageMask-01162", "VUID-vkCmdWaitEvents-dstStageMask-02113",
"VUID-vkCmdWaitEvents-dstStageMask-02114");
skip |= ValidateCmdQueueFlags(cb_state, "vkCmdWaitEvents()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
"VUID-vkCmdWaitEvents-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_WAITEVENTS, "vkCmdWaitEvents()");
skip |= ValidateBarriersToImages(cb_state, imageMemoryBarrierCount, pImageMemoryBarriers, "vkCmdWaitEvents()");
skip |= ValidateBarriers("vkCmdWaitEvents()", cb_state, sourceStageMask, dstStageMask, memoryBarrierCount, pMemoryBarriers,
bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
return skip;
}
void CoreChecks::PreCallRecordCmdWaitEvents(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents,
VkPipelineStageFlags sourceStageMask, VkPipelineStageFlags dstStageMask,
uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
// The StateTracker added will add to the events vector.
auto first_event_index = cb_state->events.size();
StateTracker::PreCallRecordCmdWaitEvents(commandBuffer, eventCount, pEvents, sourceStageMask, dstStageMask, memoryBarrierCount,
pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers,
imageMemoryBarrierCount, pImageMemoryBarriers);
auto event_added_count = cb_state->events.size() - first_event_index;
const CMD_BUFFER_STATE *cb_state_const = cb_state;
cb_state->eventUpdates.emplace_back(
[cb_state_const, event_added_count, first_event_index, sourceStageMask](
const ValidationStateTracker *device_data, bool do_validate, EventToStageMap *localEventToStageMap) {
if (!do_validate) return false;
return ValidateEventStageMask(device_data, cb_state_const, event_added_count, first_event_index, sourceStageMask,
localEventToStageMap);
});
TransitionImageLayouts(cb_state, imageMemoryBarrierCount, pImageMemoryBarriers);
}
void CoreChecks::PostCallRecordCmdWaitEvents(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents,
VkPipelineStageFlags sourceStageMask, VkPipelineStageFlags dstStageMask,
uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
RecordBarrierValidationInfo("vkCmdWaitEvents", cb_state, bufferMemoryBarrierCount, pBufferMemoryBarriers,
imageMemoryBarrierCount, pImageMemoryBarriers);
}
bool CoreChecks::PreCallValidateCmdPipelineBarrier(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask,
VkPipelineStageFlags dstStageMask, VkDependencyFlags dependencyFlags,
uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
uint32_t bufferMemoryBarrierCount,
const VkBufferMemoryBarrier *pBufferMemoryBarriers,
uint32_t imageMemoryBarrierCount,
const VkImageMemoryBarrier *pImageMemoryBarriers) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = false;
auto barrier_op_type = ComputeBarrierOperationsType(cb_state, bufferMemoryBarrierCount, pBufferMemoryBarriers,
imageMemoryBarrierCount, pImageMemoryBarriers);
skip |= ValidateStageMasksAgainstQueueCapabilities(cb_state, srcStageMask, dstStageMask, barrier_op_type,
"vkCmdPipelineBarrier", "VUID-vkCmdPipelineBarrier-srcStageMask-01183");
skip |= ValidateCmdQueueFlags(cb_state, "vkCmdPipelineBarrier()",
VK_QUEUE_TRANSFER_BIT | VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
"VUID-vkCmdPipelineBarrier-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_PIPELINEBARRIER, "vkCmdPipelineBarrier()");
skip |=
ValidateStageMaskGsTsEnables(srcStageMask, "vkCmdPipelineBarrier()", "VUID-vkCmdPipelineBarrier-srcStageMask-01168",
"VUID-vkCmdPipelineBarrier-srcStageMask-01170", "VUID-vkCmdPipelineBarrier-srcStageMask-02115",
"VUID-vkCmdPipelineBarrier-srcStageMask-02116");
skip |=
ValidateStageMaskGsTsEnables(dstStageMask, "vkCmdPipelineBarrier()", "VUID-vkCmdPipelineBarrier-dstStageMask-01169",
"VUID-vkCmdPipelineBarrier-dstStageMask-01171", "VUID-vkCmdPipelineBarrier-dstStageMask-02117",
"VUID-vkCmdPipelineBarrier-dstStageMask-02118");
if (cb_state->activeRenderPass) {
skip |= ValidateRenderPassPipelineBarriers("vkCmdPipelineBarrier()", cb_state, srcStageMask, dstStageMask, dependencyFlags,
memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
if (skip) return true; // Early return to avoid redundant errors from below calls
}
skip |= ValidateBarriersToImages(cb_state, imageMemoryBarrierCount, pImageMemoryBarriers, "vkCmdPipelineBarrier()");
skip |= ValidateBarriers("vkCmdPipelineBarrier()", cb_state, srcStageMask, dstStageMask, memoryBarrierCount, pMemoryBarriers,
bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
return skip;
}
void CoreChecks::EnqueueSubmitTimeValidateImageBarrierAttachment(const char *func_name, CMD_BUFFER_STATE *cb_state,
uint32_t imageMemBarrierCount,
const VkImageMemoryBarrier *pImageMemBarriers) {
// Secondary CBs can have null framebuffer so queue up validation in that case 'til FB is known
if ((cb_state->activeRenderPass) && (VK_NULL_HANDLE == cb_state->activeFramebuffer) &&
(VK_COMMAND_BUFFER_LEVEL_SECONDARY == cb_state->createInfo.level)) {
const auto active_subpass = cb_state->activeSubpass;
const auto rp_state = cb_state->activeRenderPass;
const auto &sub_desc = rp_state->createInfo.pSubpasses[active_subpass];
const VulkanTypedHandle rp_handle(rp_state->renderPass, kVulkanObjectTypeRenderPass);
for (uint32_t i = 0; i < imageMemBarrierCount; ++i) {
const auto &img_barrier = pImageMemBarriers[i];
// Secondary CB case w/o FB specified delay validation
cb_state->cmd_execute_commands_functions.emplace_back([=](const CMD_BUFFER_STATE *primary_cb, VkFramebuffer fb) {
return ValidateImageBarrierAttachment(func_name, cb_state, fb, active_subpass, sub_desc, rp_handle, i, img_barrier);
});
}
}
}
void CoreChecks::PreCallRecordCmdPipelineBarrier(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask,
VkPipelineStageFlags dstStageMask, VkDependencyFlags dependencyFlags,
uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
uint32_t bufferMemoryBarrierCount,
const VkBufferMemoryBarrier *pBufferMemoryBarriers,
uint32_t imageMemoryBarrierCount,
const VkImageMemoryBarrier *pImageMemoryBarriers) {
CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
const char *func_name = "vkCmdPipelineBarrier";
RecordBarrierValidationInfo(func_name, cb_state, bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount,
pImageMemoryBarriers);
EnqueueSubmitTimeValidateImageBarrierAttachment(func_name, cb_state, imageMemoryBarrierCount, pImageMemoryBarriers);
TransitionImageLayouts(cb_state, imageMemoryBarrierCount, pImageMemoryBarriers);
}
bool CoreChecks::ValidateBeginQuery(const CMD_BUFFER_STATE *cb_state, const QueryObject &query_obj, VkFlags flags, CMD_TYPE cmd,
const char *cmd_name, const char *vuid_queue_flags, const char *vuid_queue_feedback,
const char *vuid_queue_occlusion, const char *vuid_precise,
const char *vuid_query_count) const {
bool skip = false;
const auto *query_pool_state = GetQueryPoolState(query_obj.pool);
const auto &query_pool_ci = query_pool_state->createInfo;
if (query_pool_ci.queryType == VK_QUERY_TYPE_TIMESTAMP) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_state->commandBuffer), "VUID-vkCmdBeginQuery-queryType-02804",
"%s: The querypool's query type must not be VK_QUERY_TYPE_TIMESTAMP.", cmd_name);
}
// There are tighter queue constraints to test for certain query pools
if (query_pool_ci.queryType == VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT) {
skip |= ValidateCmdQueueFlags(cb_state, cmd_name, VK_QUEUE_GRAPHICS_BIT, vuid_queue_feedback);
}
if (query_pool_ci.queryType == VK_QUERY_TYPE_OCCLUSION) {
skip |= ValidateCmdQueueFlags(cb_state, cmd_name, VK_QUEUE_GRAPHICS_BIT, vuid_queue_occlusion);
}
if (query_pool_ci.queryType == VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR) {
if (!cb_state->performance_lock_acquired) {
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_state->commandBuffer),
query_obj.indexed ? "VUID-vkCmdBeginQueryIndexedEXT-queryPool-03223" : "VUID-vkCmdBeginQuery-queryPool-03223",
"%s: profiling lock must be held before vkBeginCommandBuffer is called on "
"a command buffer where performance queries are recorded.",
cmd_name);
}
if (query_pool_state->has_perf_scope_command_buffer && cb_state->commandCount > 0) {
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_state->commandBuffer),
query_obj.indexed ? "VUID-vkCmdBeginQueryIndexedEXT-queryPool-03224" : "VUID-vkCmdBeginQuery-queryPool-03224",
"%s: Query pool %s was created with a counter of scope "
"VK_QUERY_SCOPE_COMMAND_BUFFER_KHR but %s is not the first recorded "
"command in the command buffer.",
cmd_name, report_data->FormatHandle(query_obj.pool).c_str(), cmd_name);
}
if (query_pool_state->has_perf_scope_render_pass && cb_state->activeRenderPass) {
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_state->commandBuffer),
query_obj.indexed ? "VUID-vkCmdBeginQueryIndexedEXT-queryPool-03225" : "VUID-vkCmdBeginQuery-queryPool-03225",
"%s: Query pool %s was created with a counter of scope "
"VK_QUERY_SCOPE_RENDER_PASS_KHR but %s is inside a render pass.",
cmd_name, report_data->FormatHandle(query_obj.pool).c_str(), cmd_name);
}
}
skip |= ValidateCmdQueueFlags(cb_state, cmd_name, VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT, vuid_queue_flags);
if (flags & VK_QUERY_CONTROL_PRECISE_BIT) {
if (!enabled_features.core.occlusionQueryPrecise) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_state->commandBuffer), vuid_precise,
"%s: VK_QUERY_CONTROL_PRECISE_BIT provided, but precise occlusion queries not enabled on the device.",
cmd_name);
}
if (query_pool_ci.queryType != VK_QUERY_TYPE_OCCLUSION) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_state->commandBuffer), vuid_precise,
"%s: VK_QUERY_CONTROL_PRECISE_BIT provided, but pool query type is not VK_QUERY_TYPE_OCCLUSION", cmd_name);
}
}
if (query_obj.query >= query_pool_ci.queryCount) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_state->commandBuffer), vuid_query_count,
"%s: Query index %" PRIu32 " must be less than query count %" PRIu32 " of %s.", cmd_name, query_obj.query,
query_pool_ci.queryCount, report_data->FormatHandle(query_obj.pool).c_str());
}
skip |= ValidateCmd(cb_state, cmd, cmd_name);
return skip;
}
bool CoreChecks::PreCallValidateCmdBeginQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot,
VkFlags flags) const {
if (disabled.query_validation) return false;
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
QueryObject query_obj(queryPool, slot);
return ValidateBeginQuery(cb_state, query_obj, flags, CMD_BEGINQUERY, "vkCmdBeginQuery()",
"VUID-vkCmdBeginQuery-commandBuffer-cmdpool", "VUID-vkCmdBeginQuery-queryType-02327",
"VUID-vkCmdBeginQuery-queryType-00803", "VUID-vkCmdBeginQuery-queryType-00800",
"VUID-vkCmdBeginQuery-query-00802");
}
bool CoreChecks::VerifyQueryIsReset(const ValidationStateTracker *state_data, VkCommandBuffer commandBuffer, QueryObject query_obj,
const char *func_name, QueryMap *localQueryToStateMap) {
bool skip = false;
QueryState state = state_data->GetQueryState(localQueryToStateMap, query_obj.pool, query_obj.query);
if (state != QUERYSTATE_RESET) {
skip |= log_msg(state_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), kVUID_Core_DrawState_QueryNotReset,
"%s: %s and query %" PRIu32
": query not reset. "
"After query pool creation, each query must be reset before it is used. "
"Queries must also be reset between uses.",
func_name, state_data->report_data->FormatHandle(query_obj.pool).c_str(), query_obj.query);
}
return skip;
}
void CoreChecks::EnqueueVerifyBeginQuery(VkCommandBuffer command_buffer, const QueryObject &query_obj, const char *func_name) {
CMD_BUFFER_STATE *cb_state = GetCBState(command_buffer);
// Enqueue the submit time validation here, ahead of the submit time state update in the StateTracker's PostCallRecord
cb_state->queryUpdates.emplace_back([command_buffer, query_obj, func_name](const ValidationStateTracker *device_data,
bool do_validate, QueryMap *localQueryToStateMap) {
if (!do_validate) return false;
return VerifyQueryIsReset(device_data, command_buffer, query_obj, func_name, localQueryToStateMap);
});
}
void CoreChecks::PreCallRecordCmdBeginQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot, VkFlags flags) {
if (disabled.query_validation) return;
QueryObject query_obj = {queryPool, slot};
EnqueueVerifyBeginQuery(commandBuffer, query_obj, "vkCmdBeginQuery()");
}
bool CoreChecks::ValidateCmdEndQuery(const CMD_BUFFER_STATE *cb_state, const QueryObject &query_obj, CMD_TYPE cmd,
const char *cmd_name, const char *vuid_queue_flags, const char *vuid_active_queries) const {
bool skip = false;
if (!cb_state->activeQueries.count(query_obj)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_state->commandBuffer), vuid_active_queries,
"%s: Ending a query before it was started: %s, index %d.", cmd_name,
report_data->FormatHandle(query_obj.pool).c_str(), query_obj.query);
}
const auto *query_pool_state = GetQueryPoolState(query_obj.pool);
const auto &query_pool_ci = query_pool_state->createInfo;
if (query_pool_ci.queryType == VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR && query_pool_state->has_perf_scope_render_pass &&
cb_state->activeRenderPass) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_state->commandBuffer), "VUID-vkCmdEndQuery-queryPool-03228",
"%s: Query pool %s was created with a counter of scope "
"VK_QUERY_SCOPE_RENDER_PASS_KHR but %s is inside a render pass.",
cmd_name, report_data->FormatHandle(query_obj.pool).c_str(), cmd_name);
}
skip |= ValidateCmdQueueFlags(cb_state, cmd_name, VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT, vuid_queue_flags);
skip |= ValidateCmd(cb_state, cmd, cmd_name);
return skip;
}
bool CoreChecks::PreCallValidateCmdEndQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot) const {
if (disabled.query_validation) return false;
QueryObject query_obj = {queryPool, slot};
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
return ValidateCmdEndQuery(cb_state, query_obj, CMD_ENDQUERY, "vkCmdEndQuery()", "VUID-vkCmdEndQuery-commandBuffer-cmdpool",
"VUID-vkCmdEndQuery-None-01923");
}
bool CoreChecks::PreCallValidateCmdResetQueryPool(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery,
uint32_t queryCount) const {
if (disabled.query_validation) return false;
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
bool skip = InsideRenderPass(cb_state, "vkCmdResetQueryPool()", "VUID-vkCmdResetQueryPool-renderpass");
skip |= ValidateCmd(cb_state, CMD_RESETQUERYPOOL, "VkCmdResetQueryPool()");
skip |= ValidateCmdQueueFlags(cb_state, "VkCmdResetQueryPool()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
"VUID-vkCmdResetQueryPool-commandBuffer-cmdpool");
return skip;
}
static QueryResultType GetQueryResultType(QueryState state, VkQueryResultFlags flags) {
switch (state) {
case QUERYSTATE_UNKNOWN:
return QUERYRESULT_UNKNOWN;
case QUERYSTATE_RESET:
case QUERYSTATE_RUNNING:
if (flags & VK_QUERY_RESULT_WAIT_BIT) {
return ((state == QUERYSTATE_RESET) ? QUERYRESULT_WAIT_ON_RESET : QUERYRESULT_WAIT_ON_RUNNING);
} else if ((flags & VK_QUERY_RESULT_PARTIAL_BIT) || (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT)) {
return QUERYRESULT_SOME_DATA;
} else {
return QUERYRESULT_NO_DATA;
}
case QUERYSTATE_ENDED:
if ((flags & VK_QUERY_RESULT_WAIT_BIT) || (flags & VK_QUERY_RESULT_PARTIAL_BIT) ||
(flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT)) {
return QUERYRESULT_SOME_DATA;
} else {
return QUERYRESULT_MAYBE_NO_DATA;
}
case QUERYSTATE_AVAILABLE:
return QUERYRESULT_SOME_DATA;
}
assert(false);
return QUERYRESULT_UNKNOWN;
}
bool CoreChecks::ValidateCopyQueryPoolResults(const ValidationStateTracker *state_data, VkCommandBuffer commandBuffer,
VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount,
VkQueryResultFlags flags, QueryMap *localQueryToStateMap) {
bool skip = false;
for (uint32_t i = 0; i < queryCount; i++) {
QueryState state = state_data->GetQueryState(localQueryToStateMap, queryPool, firstQuery + i);
QueryResultType result_type = GetQueryResultType(state, flags);
if (result_type != QUERYRESULT_SOME_DATA) {
skip |= log_msg(state_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), kVUID_Core_DrawState_InvalidQuery,
"vkCmdCopyQueryPoolResults(): Requesting a copy from query to buffer on %s query %" PRIu32 ": %s",
state_data->report_data->FormatHandle(queryPool).c_str(), firstQuery + i,
string_QueryResultType(result_type));
}
}
return skip;
}
bool CoreChecks::PreCallValidateCmdCopyQueryPoolResults(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery,
uint32_t queryCount, VkBuffer dstBuffer, VkDeviceSize dstOffset,
VkDeviceSize stride, VkQueryResultFlags flags) const {
if (disabled.query_validation) return false;
const auto cb_state = GetCBState(commandBuffer);
const auto dst_buff_state = GetBufferState(dstBuffer);
assert(cb_state);
assert(dst_buff_state);
bool skip = ValidateMemoryIsBoundToBuffer(dst_buff_state, "vkCmdCopyQueryPoolResults()",
"VUID-vkCmdCopyQueryPoolResults-dstBuffer-00826");
skip |= ValidateQueryPoolStride("VUID-vkCmdCopyQueryPoolResults-flags-00822", "VUID-vkCmdCopyQueryPoolResults-flags-00823",
stride, "dstOffset", dstOffset, flags);
// Validate that DST buffer has correct usage flags set
skip |= ValidateBufferUsageFlags(dst_buff_state, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
"VUID-vkCmdCopyQueryPoolResults-dstBuffer-00825", "vkCmdCopyQueryPoolResults()",
"VK_BUFFER_USAGE_TRANSFER_DST_BIT");
skip |= ValidateCmdQueueFlags(cb_state, "vkCmdCopyQueryPoolResults()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
"VUID-vkCmdCopyQueryPoolResults-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_COPYQUERYPOOLRESULTS, "vkCmdCopyQueryPoolResults()");
skip |= InsideRenderPass(cb_state, "vkCmdCopyQueryPoolResults()", "VUID-vkCmdCopyQueryPoolResults-renderpass");
auto query_pool_state_iter = queryPoolMap.find(queryPool);
if (query_pool_state_iter != queryPoolMap.end()) {
auto query_pool_state = query_pool_state_iter->second.get();
if (query_pool_state->createInfo.queryType == VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR) {
skip |= ValidatePerformanceQueryResults("vkCmdCopyQueryPoolResults", query_pool_state, firstQuery, queryCount, flags);
if (!phys_dev_ext_props.performance_query_props.allowCommandBufferQueryCopies) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdCopyQueryPoolResults-queryType-03232",
"vkCmdCopyQueryPoolResults called with query pool %s but "
"VkPhysicalDevicePerformanceQueryPropertiesKHR::allowCommandBufferQueryCopies "
"is not set.",
report_data->FormatHandle(queryPool).c_str());
}
}
}
return skip;
}
void CoreChecks::PreCallRecordCmdCopyQueryPoolResults(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery,
uint32_t queryCount, VkBuffer dstBuffer, VkDeviceSize dstOffset,
VkDeviceSize stride, VkQueryResultFlags flags) {
if (disabled.query_validation) return;
auto cb_state = GetCBState(commandBuffer);
cb_state->queryUpdates.emplace_back(
[commandBuffer, queryPool, firstQuery, queryCount, flags](const ValidationStateTracker *device_data, bool do_validate,
QueryMap *localQueryToStateMap) {
if (!do_validate) return false;
return ValidateCopyQueryPoolResults(device_data, commandBuffer, queryPool, firstQuery, queryCount, flags,
localQueryToStateMap);
});
}
bool CoreChecks::PreCallValidateCmdPushConstants(VkCommandBuffer commandBuffer, VkPipelineLayout layout,
VkShaderStageFlags stageFlags, uint32_t offset, uint32_t size,
const void *pValues) const {
bool skip = false;
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
skip |= ValidateCmdQueueFlags(cb_state, "vkCmdPushConstants()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
"VUID-vkCmdPushConstants-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_PUSHCONSTANTS, "vkCmdPushConstants()");
skip |= ValidatePushConstantRange(offset, size, "vkCmdPushConstants()");
if (0 == stageFlags) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdPushConstants-stageFlags-requiredbitmask",
"vkCmdPushConstants() call has no stageFlags set.");
}
// Check if pipeline_layout VkPushConstantRange(s) overlapping offset, size have stageFlags set for each stage in the command
// stageFlags argument, *and* that the command stageFlags argument has bits set for the stageFlags in each overlapping range.
if (!skip) {
const auto &ranges = *GetPipelineLayout(layout)->push_constant_ranges;
VkShaderStageFlags found_stages = 0;
for (const auto &range : ranges) {
if ((offset >= range.offset) && (offset + size <= range.offset + range.size)) {
VkShaderStageFlags matching_stages = range.stageFlags & stageFlags;
if (matching_stages != range.stageFlags) {
// "VUID-vkCmdPushConstants-offset-01796" VUID-vkCmdPushConstants-offset-01796
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdPushConstants-offset-01796",
"vkCmdPushConstants(): stageFlags (0x%" PRIx32 ", offset (%" PRIu32 "), and size (%" PRIu32
"), must contain all stages in overlapping VkPushConstantRange stageFlags (0x%" PRIx32
"), offset (%" PRIu32 "), and size (%" PRIu32 ") in %s.",
(uint32_t)stageFlags, offset, size, (uint32_t)range.stageFlags, range.offset, range.size,
report_data->FormatHandle(layout).c_str());
}
// Accumulate all stages we've found
found_stages = matching_stages | found_stages;
}
}
if (found_stages != stageFlags) {
// "VUID-vkCmdPushConstants-offset-01795" VUID-vkCmdPushConstants-offset-01795
uint32_t missing_stages = ~found_stages & stageFlags;
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdPushConstants-offset-01795",
"vkCmdPushConstants(): stageFlags = 0x%" PRIx32
", VkPushConstantRange in %s overlapping offset = %d and size = %d, do not contain "
"stageFlags 0x%" PRIx32 ".",
(uint32_t)stageFlags, report_data->FormatHandle(layout).c_str(), offset, size, missing_stages);
}
}
return skip;
}
bool CoreChecks::PreCallValidateCmdWriteTimestamp(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage,
VkQueryPool queryPool, uint32_t slot) const {
if (disabled.query_validation) return false;
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdWriteTimestamp()",
VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT | VK_QUEUE_TRANSFER_BIT,
"VUID-vkCmdWriteTimestamp-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_WRITETIMESTAMP, "vkCmdWriteTimestamp()");
return skip;
}
void CoreChecks::PreCallRecordCmdWriteTimestamp(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage,
VkQueryPool queryPool, uint32_t slot) {
if (disabled.query_validation) return;
// Enqueue the submit time validation check here, before the submit time state update in StateTracker::PostCall...
CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
QueryObject query = {queryPool, slot};
const char *func_name = "vkCmdWriteTimestamp()";
cb_state->queryUpdates.emplace_back([commandBuffer, query, func_name](const ValidationStateTracker *device_data,
bool do_validate, QueryMap *localQueryToStateMap) {
if (!do_validate) return false;
return VerifyQueryIsReset(device_data, commandBuffer, query, func_name, localQueryToStateMap);
});
}
bool CoreChecks::MatchUsage(uint32_t count, const VkAttachmentReference2KHR *attachments, const VkFramebufferCreateInfo *fbci,
VkImageUsageFlagBits usage_flag, const char *error_code) const {
bool skip = false;
if (attachments) {
for (uint32_t attach = 0; attach < count; attach++) {
if (attachments[attach].attachment != VK_ATTACHMENT_UNUSED) {
// Attachment counts are verified elsewhere, but prevent an invalid access
if (attachments[attach].attachment < fbci->attachmentCount) {
if ((fbci->flags & VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT_KHR) == 0) {
const VkImageView *image_view = &fbci->pAttachments[attachments[attach].attachment];
auto view_state = GetImageViewState(*image_view);
if (view_state) {
const VkImageCreateInfo *ici = &GetImageState(view_state->create_info.image)->createInfo;
if (ici != nullptr) {
if ((ici->usage & usage_flag) == 0) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, error_code,
"vkCreateFramebuffer: Framebuffer Attachment (%d) conflicts with the image's "
"IMAGE_USAGE flags (%s).",
attachments[attach].attachment, string_VkImageUsageFlagBits(usage_flag));
}
}
}
} else {
const VkFramebufferAttachmentsCreateInfoKHR *fbaci =
lvl_find_in_chain<VkFramebufferAttachmentsCreateInfoKHR>(fbci->pNext);
if (fbaci != nullptr && fbaci->pAttachmentImageInfos != nullptr &&
fbaci->attachmentImageInfoCount > attachments[attach].attachment) {
uint32_t image_usage = fbaci->pAttachmentImageInfos[attachments[attach].attachment].usage;
if ((image_usage & usage_flag) == 0) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT,
0, error_code,
"vkCreateFramebuffer: Framebuffer attachment info (%d) conflicts with the image's "
"IMAGE_USAGE flags (%s).",
attachments[attach].attachment, string_VkImageUsageFlagBits(usage_flag));
}
}
}
}
}
}
}
return skip;
}
// Validate VkFramebufferCreateInfo which includes:
// 1. attachmentCount equals renderPass attachmentCount
// 2. corresponding framebuffer and renderpass attachments have matching formats
// 3. corresponding framebuffer and renderpass attachments have matching sample counts
// 4. fb attachments only have a single mip level
// 5. fb attachment dimensions are each at least as large as the fb
// 6. fb attachments use idenity swizzle
// 7. fb attachments used by renderPass for color/input/ds have correct usage bit set
// 8. fb dimensions are within physical device limits
bool CoreChecks::ValidateFramebufferCreateInfo(const VkFramebufferCreateInfo *pCreateInfo) const {
bool skip = false;
const VkFramebufferAttachmentsCreateInfoKHR *pFramebufferAttachmentsCreateInfo =
lvl_find_in_chain<VkFramebufferAttachmentsCreateInfoKHR>(pCreateInfo->pNext);
if ((pCreateInfo->flags & VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT_KHR) != 0) {
if (!enabled_features.imageless_framebuffer_features.imagelessFramebuffer) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkFramebufferCreateInfo-flags-03189",
"vkCreateFramebuffer(): VkFramebufferCreateInfo flags includes VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT_KHR, "
"but the imagelessFramebuffer feature is not enabled.");
}
if (pFramebufferAttachmentsCreateInfo == nullptr) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkFramebufferCreateInfo-flags-03190",
"vkCreateFramebuffer(): VkFramebufferCreateInfo flags includes VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT_KHR, "
"but no instance of VkFramebufferAttachmentsCreateInfoKHR is present in the pNext chain.");
} else {
if (pFramebufferAttachmentsCreateInfo->attachmentImageInfoCount != 0 &&
pFramebufferAttachmentsCreateInfo->attachmentImageInfoCount != pCreateInfo->attachmentCount) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkFramebufferCreateInfo-flags-03191",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachmentCount is %u, but "
"VkFramebufferAttachmentsCreateInfoKHR attachmentImageInfoCount is %u.",
pCreateInfo->attachmentCount, pFramebufferAttachmentsCreateInfo->attachmentImageInfoCount);
}
}
}
auto rp_state = GetRenderPassState(pCreateInfo->renderPass);
if (rp_state) {
const VkRenderPassCreateInfo2KHR *rpci = rp_state->createInfo.ptr();
if (rpci->attachmentCount != pCreateInfo->attachmentCount) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
HandleToUint64(pCreateInfo->renderPass), "VUID-VkFramebufferCreateInfo-attachmentCount-00876",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachmentCount of %u does not match attachmentCount "
"of %u of %s being used to create Framebuffer.",
pCreateInfo->attachmentCount, rpci->attachmentCount,
report_data->FormatHandle(pCreateInfo->renderPass).c_str());
} else {
// attachmentCounts match, so make sure corresponding attachment details line up
if ((pCreateInfo->flags & VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT_KHR) == 0) {
const VkImageView *image_views = pCreateInfo->pAttachments;
for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
auto view_state = GetImageViewState(image_views[i]);
if (view_state == nullptr) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT,
HandleToUint64(image_views[i]), "VUID-VkFramebufferCreateInfo-flags-03188",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u is not a valid VkImageView.", i);
} else {
auto &ivci = view_state->create_info;
if (ivci.format != rpci->pAttachments[i].format) {
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
HandleToUint64(pCreateInfo->renderPass), "VUID-VkFramebufferCreateInfo-pAttachments-00880",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has format of %s that does not "
"match the format of %s used by the corresponding attachment for %s.",
i, string_VkFormat(ivci.format), string_VkFormat(rpci->pAttachments[i].format),
report_data->FormatHandle(pCreateInfo->renderPass).c_str());
}
const VkImageCreateInfo *ici = &GetImageState(ivci.image)->createInfo;
if (ici->samples != rpci->pAttachments[i].samples) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
HandleToUint64(pCreateInfo->renderPass), "VUID-VkFramebufferCreateInfo-pAttachments-00881",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has %s samples that do not "
"match the %s "
"samples used by the corresponding attachment for %s.",
i, string_VkSampleCountFlagBits(ici->samples),
string_VkSampleCountFlagBits(rpci->pAttachments[i].samples),
report_data->FormatHandle(pCreateInfo->renderPass).c_str());
}
// Verify that view only has a single mip level
if (ivci.subresourceRange.levelCount != 1) {
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkFramebufferCreateInfo-pAttachments-00883",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has mip levelCount of %u but "
"only a single mip level (levelCount == 1) is allowed when creating a Framebuffer.",
i, ivci.subresourceRange.levelCount);
}
const uint32_t mip_level = ivci.subresourceRange.baseMipLevel;
uint32_t mip_width = max(1u, ici->extent.width >> mip_level);
uint32_t mip_height = max(1u, ici->extent.height >> mip_level);
if (!(rpci->pAttachments[i].initialLayout == VK_IMAGE_LAYOUT_FRAGMENT_DENSITY_MAP_OPTIMAL_EXT ||
rpci->pAttachments[i].finalLayout == VK_IMAGE_LAYOUT_FRAGMENT_DENSITY_MAP_OPTIMAL_EXT)) {
if ((ivci.subresourceRange.layerCount < pCreateInfo->layers) || (mip_width < pCreateInfo->width) ||
(mip_height < pCreateInfo->height)) {
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkFramebufferCreateInfo-pAttachments-00882",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u mip level %u has dimensions "
"smaller than the corresponding framebuffer dimensions. Here are the respective dimensions for "
"attachment #%u, framebuffer:\n"
"width: %u, %u\n"
"height: %u, %u\n"
"layerCount: %u, %u\n",
i, ivci.subresourceRange.baseMipLevel, i, mip_width, pCreateInfo->width, mip_height,
pCreateInfo->height, ivci.subresourceRange.layerCount, pCreateInfo->layers);
}
} else {
if (device_extensions.vk_ext_fragment_density_map) {
uint32_t ceiling_width = (uint32_t)ceil(
(float)pCreateInfo->width /
std::max((float)phys_dev_ext_props.fragment_density_map_props.maxFragmentDensityTexelSize.width,
1.0f));
if (mip_width < ceiling_width) {
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkFramebufferCreateInfo-pAttachments-02555",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u mip level %u has width "
"smaller than the corresponding the ceiling of framebuffer width / "
"maxFragmentDensityTexelSize.width "
"Here are the respective dimensions for attachment #%u, the ceiling value:\n "
"attachment #%u, framebuffer:\n"
"width: %u, the ceiling value: %u\n",
i, ivci.subresourceRange.baseMipLevel, i, i, mip_width, ceiling_width);
}
uint32_t ceiling_height = (uint32_t)ceil(
(float)pCreateInfo->height /
std::max(
(float)phys_dev_ext_props.fragment_density_map_props.maxFragmentDensityTexelSize.height,
1.0f));
if (mip_height < ceiling_height) {
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkFramebufferCreateInfo-pAttachments-02556",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u mip level %u has height "
"smaller than the corresponding the ceiling of framebuffer height / "
"maxFragmentDensityTexelSize.height "
"Here are the respective dimensions for attachment #%u, the ceiling value:\n "
"attachment #%u, framebuffer:\n"
"height: %u, the ceiling value: %u\n",
i, ivci.subresourceRange.baseMipLevel, i, i, mip_height, ceiling_height);
}
}
}
if (((ivci.components.r != VK_COMPONENT_SWIZZLE_IDENTITY) &&
(ivci.components.r != VK_COMPONENT_SWIZZLE_R)) ||
((ivci.components.g != VK_COMPONENT_SWIZZLE_IDENTITY) &&
(ivci.components.g != VK_COMPONENT_SWIZZLE_G)) ||
((ivci.components.b != VK_COMPONENT_SWIZZLE_IDENTITY) &&
(ivci.components.b != VK_COMPONENT_SWIZZLE_B)) ||
((ivci.components.a != VK_COMPONENT_SWIZZLE_IDENTITY) &&
(ivci.components.a != VK_COMPONENT_SWIZZLE_A))) {
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkFramebufferCreateInfo-pAttachments-00884",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has non-identy swizzle. All "
"framebuffer attachments must have been created with the identity swizzle. Here are the actual "
"swizzle values:\n"
"r swizzle = %s\n"
"g swizzle = %s\n"
"b swizzle = %s\n"
"a swizzle = %s\n",
i, string_VkComponentSwizzle(ivci.components.r), string_VkComponentSwizzle(ivci.components.g),
string_VkComponentSwizzle(ivci.components.b), string_VkComponentSwizzle(ivci.components.a));
}
}
}
} else if (pFramebufferAttachmentsCreateInfo) {
// VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT_KHR is set
for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
auto &aii = pFramebufferAttachmentsCreateInfo->pAttachmentImageInfos[i];
bool formatFound = false;
for (uint32_t j = 0; j < aii.viewFormatCount; ++j) {
if (aii.pViewFormats[j] == rpci->pAttachments[i].format) {
formatFound = true;
}
}
if (!formatFound) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
HandleToUint64(pCreateInfo->renderPass), "VUID-VkFramebufferCreateInfo-flags-03205",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment info #%u does not include "
"format %s used "
"by the corresponding attachment for renderPass (%s).",
i, string_VkFormat(rpci->pAttachments[i].format),
report_data->FormatHandle(pCreateInfo->renderPass).c_str());
}
const char *mismatchedLayersNoMultiviewVuid = device_extensions.vk_khr_multiview
? "VUID-VkFramebufferCreateInfo-renderPass-03199"
: "VUID-VkFramebufferCreateInfo-flags-03200";
if ((rpci->subpassCount == 0) || (rpci->pSubpasses[0].viewMask == 0)) {
if (aii.layerCount < pCreateInfo->layers) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
mismatchedLayersNoMultiviewVuid,
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment info #%u has only #%u layers, "
"but framebuffer has #%u layers.",
i, aii.layerCount, pCreateInfo->layers);
}
}
if (!device_extensions.vk_ext_fragment_density_map) {
if (aii.width < pCreateInfo->width) {
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkFramebufferCreateInfo-flags-03192",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment info #%u has a width of only #%u, "
"but framebuffer has a width of #%u.",
i, aii.width, pCreateInfo->width);
}
if (aii.height < pCreateInfo->height) {
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkFramebufferCreateInfo-flags-03193",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment info #%u has a height of only #%u, "
"but framebuffer has a height of #%u.",
i, aii.height, pCreateInfo->height);
}
}
}
// Validate image usage
uint32_t attachment_index = VK_ATTACHMENT_UNUSED;
for (uint32_t i = 0; i < rpci->subpassCount; ++i) {
skip |= MatchUsage(rpci->pSubpasses[i].colorAttachmentCount, rpci->pSubpasses[i].pColorAttachments, pCreateInfo,
VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, "VUID-VkFramebufferCreateInfo-flags-03201");
skip |=
MatchUsage(rpci->pSubpasses[i].colorAttachmentCount, rpci->pSubpasses[i].pResolveAttachments, pCreateInfo,
VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, "VUID-VkFramebufferCreateInfo-flags-03201");
skip |= MatchUsage(1, rpci->pSubpasses[i].pDepthStencilAttachment, pCreateInfo,
VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT, "VUID-VkFramebufferCreateInfo-flags-03202");
skip |= MatchUsage(rpci->pSubpasses[i].inputAttachmentCount, rpci->pSubpasses[i].pInputAttachments, pCreateInfo,
VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT, "VUID-VkFramebufferCreateInfo-flags-03204");
const VkSubpassDescriptionDepthStencilResolveKHR *pDepthStencilResolve =
lvl_find_in_chain<VkSubpassDescriptionDepthStencilResolveKHR>(rpci->pSubpasses[i].pNext);
if (device_extensions.vk_khr_depth_stencil_resolve && pDepthStencilResolve != nullptr) {
skip |= MatchUsage(1, pDepthStencilResolve->pDepthStencilResolveAttachment, pCreateInfo,
VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT, "VUID-VkFramebufferCreateInfo-flags-03203");
}
}
if (device_extensions.vk_khr_multiview) {
if ((rpci->subpassCount > 0) && (rpci->pSubpasses[0].viewMask != 0)) {
for (uint32_t i = 0; i < rpci->subpassCount; ++i) {
const VkSubpassDescriptionDepthStencilResolveKHR *pDepthStencilResolve =
lvl_find_in_chain<VkSubpassDescriptionDepthStencilResolveKHR>(rpci->pSubpasses[i].pNext);
uint32_t view_bits = rpci->pSubpasses[i].viewMask;
uint32_t highest_view_bit = 0;
for (int j = 0; j < 32; ++j) {
if (((view_bits >> j) & 1) != 0) {
highest_view_bit = j;
}
}
for (uint32_t j = 0; j < rpci->pSubpasses[i].colorAttachmentCount; ++j) {
attachment_index = rpci->pSubpasses[i].pColorAttachments[j].attachment;
if (attachment_index != VK_ATTACHMENT_UNUSED) {
uint32_t layer_count =
pFramebufferAttachmentsCreateInfo->pAttachmentImageInfos[attachment_index].layerCount;
if (layer_count <= highest_view_bit) {
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
HandleToUint64(pCreateInfo->renderPass),
"VUID-VkFramebufferCreateInfo-renderPass-03198",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment info %u "
"only specifies %u layers, but the view mask for subpass %u in renderPass (%s) "
"includes layer %u, with that attachment specified as a color attachment %u.",
attachment_index, layer_count, i,
report_data->FormatHandle(pCreateInfo->renderPass).c_str(), highest_view_bit, j);
}
}
if (rpci->pSubpasses[i].pResolveAttachments) {
attachment_index = rpci->pSubpasses[i].pResolveAttachments[j].attachment;
if (attachment_index != VK_ATTACHMENT_UNUSED) {
uint32_t layer_count =
pFramebufferAttachmentsCreateInfo->pAttachmentImageInfos[attachment_index].layerCount;
if (layer_count <= highest_view_bit) {
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
HandleToUint64(pCreateInfo->renderPass),
"VUID-VkFramebufferCreateInfo-renderPass-03198",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment info %u "
"only specifies %u layers, but the view mask for subpass %u in renderPass (%s) "
"includes layer %u, with that attachment specified as a resolve attachment %u.",
attachment_index, layer_count, i,
report_data->FormatHandle(pCreateInfo->renderPass).c_str(), highest_view_bit, j);
}
}
}
}
for (uint32_t j = 0; j < rpci->pSubpasses[i].inputAttachmentCount; ++j) {
attachment_index = rpci->pSubpasses[i].pInputAttachments[j].attachment;
if (attachment_index != VK_ATTACHMENT_UNUSED) {
uint32_t layer_count =
pFramebufferAttachmentsCreateInfo->pAttachmentImageInfos[attachment_index].layerCount;
if (layer_count <= highest_view_bit) {
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
HandleToUint64(pCreateInfo->renderPass),
"VUID-VkFramebufferCreateInfo-renderPass-03198",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment info %u "
"only specifies %u layers, but the view mask for subpass %u in renderPass (%s) "
"includes layer %u, with that attachment specified as an input attachment %u.",
attachment_index, layer_count, i,
report_data->FormatHandle(pCreateInfo->renderPass).c_str(), highest_view_bit, j);
}
}
}
if (rpci->pSubpasses[i].pDepthStencilAttachment != nullptr) {
attachment_index = rpci->pSubpasses[i].pDepthStencilAttachment->attachment;
if (attachment_index != VK_ATTACHMENT_UNUSED) {
uint32_t layer_count =
pFramebufferAttachmentsCreateInfo->pAttachmentImageInfos[attachment_index].layerCount;
if (layer_count <= highest_view_bit) {
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
HandleToUint64(pCreateInfo->renderPass),
"VUID-VkFramebufferCreateInfo-renderPass-03198",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment info %u "
"only specifies %u layers, but the view mask for subpass %u in renderPass (%s) "
"includes layer %u, with that attachment specified as a depth/stencil attachment.",
attachment_index, layer_count, i,
report_data->FormatHandle(pCreateInfo->renderPass).c_str(), highest_view_bit);
}
}
if (device_extensions.vk_khr_depth_stencil_resolve && pDepthStencilResolve != nullptr &&
pDepthStencilResolve->pDepthStencilResolveAttachment != nullptr) {
attachment_index = pDepthStencilResolve->pDepthStencilResolveAttachment->attachment;
if (attachment_index != VK_ATTACHMENT_UNUSED) {
uint32_t layer_count =
pFramebufferAttachmentsCreateInfo->pAttachmentImageInfos[attachment_index].layerCount;
if (layer_count <= highest_view_bit) {
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
HandleToUint64(pCreateInfo->renderPass),
"VUID-VkFramebufferCreateInfo-renderPass-03198",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment info %u "
"only specifies %u layers, but the view mask for subpass %u in renderPass (%s) "
"includes layer %u, with that attachment specified as a depth/stencil resolve "
"attachment.",
attachment_index, layer_count, i,
report_data->FormatHandle(pCreateInfo->renderPass).c_str(), highest_view_bit);
}
}
}
}
}
}
}
}
if ((pCreateInfo->flags & VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT_KHR) == 0) {
// Verify correct attachment usage flags
for (uint32_t subpass = 0; subpass < rpci->subpassCount; subpass++) {
// Verify input attachments:
skip |= MatchUsage(rpci->pSubpasses[subpass].inputAttachmentCount, rpci->pSubpasses[subpass].pInputAttachments,
pCreateInfo, VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT,
"VUID-VkFramebufferCreateInfo-pAttachments-00879");
// Verify color attachments:
skip |= MatchUsage(rpci->pSubpasses[subpass].colorAttachmentCount, rpci->pSubpasses[subpass].pColorAttachments,
pCreateInfo, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT,
"VUID-VkFramebufferCreateInfo-pAttachments-00877");
// Verify depth/stencil attachments:
skip |=
MatchUsage(1, rpci->pSubpasses[subpass].pDepthStencilAttachment, pCreateInfo,
VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT, "VUID-VkFramebufferCreateInfo-pAttachments-02633");
}
}
}
}
// Verify FB dimensions are within physical device limits
if (pCreateInfo->width > phys_dev_props.limits.maxFramebufferWidth) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkFramebufferCreateInfo-width-00886",
"vkCreateFramebuffer(): Requested VkFramebufferCreateInfo width exceeds physical device limits. Requested "
"width: %u, device max: %u\n",
pCreateInfo->width, phys_dev_props.limits.maxFramebufferWidth);
}
if (pCreateInfo->height > phys_dev_props.limits.maxFramebufferHeight) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkFramebufferCreateInfo-height-00888",
"vkCreateFramebuffer(): Requested VkFramebufferCreateInfo height exceeds physical device limits. Requested "
"height: %u, device max: %u\n",
pCreateInfo->height, phys_dev_props.limits.maxFramebufferHeight);
}
if (pCreateInfo->layers > phys_dev_props.limits.maxFramebufferLayers) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkFramebufferCreateInfo-layers-00890",
"vkCreateFramebuffer(): Requested VkFramebufferCreateInfo layers exceeds physical device limits. Requested "
"layers: %u, device max: %u\n",
pCreateInfo->layers, phys_dev_props.limits.maxFramebufferLayers);
}
// Verify FB dimensions are greater than zero
if (pCreateInfo->width <= 0) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkFramebufferCreateInfo-width-00885",
"vkCreateFramebuffer(): Requested VkFramebufferCreateInfo width must be greater than zero.");
}
if (pCreateInfo->height <= 0) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkFramebufferCreateInfo-height-00887",
"vkCreateFramebuffer(): Requested VkFramebufferCreateInfo height must be greater than zero.");
}
if (pCreateInfo->layers <= 0) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkFramebufferCreateInfo-layers-00889",
"vkCreateFramebuffer(): Requested VkFramebufferCreateInfo layers must be greater than zero.");
}
return skip;
}
bool CoreChecks::PreCallValidateCreateFramebuffer(VkDevice device, const VkFramebufferCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkFramebuffer *pFramebuffer) const {
// TODO : Verify that renderPass FB is created with is compatible with FB
bool skip = false;
skip |= ValidateFramebufferCreateInfo(pCreateInfo);
return skip;
}
static bool FindDependency(const uint32_t index, const uint32_t dependent, const std::vector<DAGNode> &subpass_to_node,
std::unordered_set<uint32_t> &processed_nodes) {
// If we have already checked this node we have not found a dependency path so return false.
if (processed_nodes.count(index)) return false;
processed_nodes.insert(index);
const DAGNode &node = subpass_to_node[index];
// Look for a dependency path. If one exists return true else recurse on the previous nodes.
if (std::find(node.prev.begin(), node.prev.end(), dependent) == node.prev.end()) {
for (auto elem : node.prev) {
if (FindDependency(elem, dependent, subpass_to_node, processed_nodes)) return true;
}
} else {
return true;
}
return false;
}
bool CoreChecks::IsImageLayoutReadOnly(VkImageLayout layout) const {
if ((layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL) || (layout == VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL) ||
(layout == VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL) ||
(layout == VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL)) {
return true;
}
return false;
}
bool CoreChecks::CheckDependencyExists(const uint32_t subpass, const VkImageLayout layout,
const std::vector<SubpassLayout> &dependent_subpasses,
const std::vector<DAGNode> &subpass_to_node, bool &skip) const {
bool result = true;
bool bImageLayoutReadOnly = IsImageLayoutReadOnly(layout);
// Loop through all subpasses that share the same attachment and make sure a dependency exists
for (uint32_t k = 0; k < dependent_subpasses.size(); ++k) {
const SubpassLayout &sp = dependent_subpasses[k];
if (subpass == sp.index) continue;
if (bImageLayoutReadOnly && IsImageLayoutReadOnly(sp.layout)) continue;
const DAGNode &node = subpass_to_node[subpass];
// Check for a specified dependency between the two nodes. If one exists we are done.
auto prev_elem = std::find(node.prev.begin(), node.prev.end(), sp.index);
auto next_elem = std::find(node.next.begin(), node.next.end(), sp.index);
if (prev_elem == node.prev.end() && next_elem == node.next.end()) {
// If no dependency exits an implicit dependency still might. If not, throw an error.
std::unordered_set<uint32_t> processed_nodes;
if (!(FindDependency(subpass, sp.index, subpass_to_node, processed_nodes) ||
FindDependency(sp.index, subpass, subpass_to_node, processed_nodes))) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
kVUID_Core_DrawState_InvalidRenderpass,
"A dependency between subpasses %d and %d must exist but one is not specified.", subpass, sp.index);
result = false;
}
}
}
return result;
}
bool CoreChecks::CheckPreserved(const VkRenderPassCreateInfo2KHR *pCreateInfo, const int index, const uint32_t attachment,
const std::vector<DAGNode> &subpass_to_node, int depth, bool &skip) const {
const DAGNode &node = subpass_to_node[index];
// If this node writes to the attachment return true as next nodes need to preserve the attachment.
const VkSubpassDescription2KHR &subpass = pCreateInfo->pSubpasses[index];
for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
if (attachment == subpass.pColorAttachments[j].attachment) return true;
}
for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
if (attachment == subpass.pInputAttachments[j].attachment) return true;
}
if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
if (attachment == subpass.pDepthStencilAttachment->attachment) return true;
}
bool result = false;
// Loop through previous nodes and see if any of them write to the attachment.
for (auto elem : node.prev) {
result |= CheckPreserved(pCreateInfo, elem, attachment, subpass_to_node, depth + 1, skip);
}
// If the attachment was written to by a previous node than this node needs to preserve it.
if (result && depth > 0) {
bool has_preserved = false;
for (uint32_t j = 0; j < subpass.preserveAttachmentCount; ++j) {
if (subpass.pPreserveAttachments[j] == attachment) {
has_preserved = true;
break;
}
}
if (!has_preserved) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
kVUID_Core_DrawState_InvalidRenderpass,
"Attachment %d is used by a later subpass and must be preserved in subpass %d.", attachment, index);
}
}
return result;
}
template <class T>
bool IsRangeOverlapping(T offset1, T size1, T offset2, T size2) {
return (((offset1 + size1) > offset2) && ((offset1 + size1) < (offset2 + size2))) ||
((offset1 > offset2) && (offset1 < (offset2 + size2)));
}
bool IsRegionOverlapping(VkImageSubresourceRange range1, VkImageSubresourceRange range2) {
return (IsRangeOverlapping(range1.baseMipLevel, range1.levelCount, range2.baseMipLevel, range2.levelCount) &&
IsRangeOverlapping(range1.baseArrayLayer, range1.layerCount, range2.baseArrayLayer, range2.layerCount));
}
bool CoreChecks::ValidateDependencies(FRAMEBUFFER_STATE const *framebuffer, RENDER_PASS_STATE const *renderPass) const {
bool skip = false;
auto const pFramebufferInfo = framebuffer->createInfo.ptr();
auto const pCreateInfo = renderPass->createInfo.ptr();
auto const &subpass_to_node = renderPass->subpassToNode;
struct Attachment {
std::vector<SubpassLayout> outputs;
std::vector<SubpassLayout> inputs;
std::vector<uint32_t> overlapping;
};
std::vector<Attachment> attachments(pCreateInfo->attachmentCount);
// Find overlapping attachments
for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
for (uint32_t j = i + 1; j < pCreateInfo->attachmentCount; ++j) {
VkImageView viewi = pFramebufferInfo->pAttachments[i];
VkImageView viewj = pFramebufferInfo->pAttachments[j];
if (viewi == viewj) {
attachments[i].overlapping.emplace_back(j);
attachments[j].overlapping.emplace_back(i);
continue;
}
auto view_state_i = GetImageViewState(viewi);
auto view_state_j = GetImageViewState(viewj);
if (!view_state_i || !view_state_j) {
continue;
}
auto view_ci_i = view_state_i->create_info;
auto view_ci_j = view_state_j->create_info;
if (view_ci_i.image == view_ci_j.image && IsRegionOverlapping(view_ci_i.subresourceRange, view_ci_j.subresourceRange)) {
attachments[i].overlapping.emplace_back(j);
attachments[j].overlapping.emplace_back(i);
continue;
}
auto image_data_i = GetImageState(view_ci_i.image);
auto image_data_j = GetImageState(view_ci_j.image);
if (!image_data_i || !image_data_j) {
continue;
}
if (image_data_i->binding.mem == image_data_j->binding.mem &&
IsRangeOverlapping(image_data_i->binding.offset, image_data_i->binding.size, image_data_j->binding.offset,
image_data_j->binding.size)) {
attachments[i].overlapping.emplace_back(j);
attachments[j].overlapping.emplace_back(i);
}
}
}
// Find for each attachment the subpasses that use them.
unordered_set<uint32_t> attachmentIndices;
for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
const VkSubpassDescription2KHR &subpass = pCreateInfo->pSubpasses[i];
attachmentIndices.clear();
for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
uint32_t attachment = subpass.pInputAttachments[j].attachment;
if (attachment == VK_ATTACHMENT_UNUSED) continue;
SubpassLayout sp = {i, subpass.pInputAttachments[j].layout};
attachments[attachment].inputs.emplace_back(sp);
for (auto overlapping_attachment : attachments[attachment].overlapping) {
attachments[overlapping_attachment].inputs.emplace_back(sp);
}
}
for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
uint32_t attachment = subpass.pColorAttachments[j].attachment;
if (attachment == VK_ATTACHMENT_UNUSED) continue;
SubpassLayout sp = {i, subpass.pColorAttachments[j].layout};
attachments[attachment].outputs.emplace_back(sp);
for (auto overlapping_attachment : attachments[attachment].overlapping) {
attachments[overlapping_attachment].outputs.emplace_back(sp);
}
attachmentIndices.insert(attachment);
}
if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
uint32_t attachment = subpass.pDepthStencilAttachment->attachment;
SubpassLayout sp = {i, subpass.pDepthStencilAttachment->layout};
attachments[attachment].outputs.emplace_back(sp);
for (auto overlapping_attachment : attachments[attachment].overlapping) {
attachments[overlapping_attachment].outputs.emplace_back(sp);
}
if (attachmentIndices.count(attachment)) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
kVUID_Core_DrawState_InvalidRenderpass,
"Cannot use same attachment (%u) as both color and depth output in same subpass (%u).", attachment, i);
}
}
}
// If there is a dependency needed make sure one exists
for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
const VkSubpassDescription2KHR &subpass = pCreateInfo->pSubpasses[i];
// If the attachment is an input then all subpasses that output must have a dependency relationship
for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
uint32_t attachment = subpass.pInputAttachments[j].attachment;
if (attachment == VK_ATTACHMENT_UNUSED) continue;
CheckDependencyExists(i, subpass.pInputAttachments[j].layout, attachments[attachment].outputs, subpass_to_node, skip);
}
// If the attachment is an output then all subpasses that use the attachment must have a dependency relationship
for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
uint32_t attachment = subpass.pColorAttachments[j].attachment;
if (attachment == VK_ATTACHMENT_UNUSED) continue;
CheckDependencyExists(i, subpass.pColorAttachments[j].layout, attachments[attachment].outputs, subpass_to_node, skip);
CheckDependencyExists(i, subpass.pColorAttachments[j].layout, attachments[attachment].inputs, subpass_to_node, skip);
}
if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
const uint32_t &attachment = subpass.pDepthStencilAttachment->attachment;
CheckDependencyExists(i, subpass.pDepthStencilAttachment->layout, attachments[attachment].outputs, subpass_to_node,
skip);
CheckDependencyExists(i, subpass.pDepthStencilAttachment->layout, attachments[attachment].inputs, subpass_to_node,
skip);
}
}
// Loop through implicit dependencies, if this pass reads make sure the attachment is preserved for all passes after it was
// written.
for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
const VkSubpassDescription2KHR &subpass = pCreateInfo->pSubpasses[i];
for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
CheckPreserved(pCreateInfo, i, subpass.pInputAttachments[j].attachment, subpass_to_node, 0, skip);
}
}
return skip;
}
bool CoreChecks::ValidateRenderPassDAG(RenderPassCreateVersion rp_version, const VkRenderPassCreateInfo2KHR *pCreateInfo) const {
bool skip = false;
const char *vuid;
const bool use_rp2 = (rp_version == RENDER_PASS_VERSION_2);
for (uint32_t i = 0; i < pCreateInfo->dependencyCount; ++i) {
const VkSubpassDependency2KHR &dependency = pCreateInfo->pDependencies[i];
VkPipelineStageFlagBits latest_src_stage = GetLogicallyLatestGraphicsPipelineStage(dependency.srcStageMask);
VkPipelineStageFlagBits earliest_dst_stage = GetLogicallyEarliestGraphicsPipelineStage(dependency.dstStageMask);
// The first subpass here serves as a good proxy for "is multiview enabled" - since all view masks need to be non-zero if
// any are, which enables multiview.
if (use_rp2 && (dependency.dependencyFlags & VK_DEPENDENCY_VIEW_LOCAL_BIT) && (pCreateInfo->pSubpasses[0].viewMask == 0)) {
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkRenderPassCreateInfo2KHR-viewMask-03059",
"Dependency %u specifies the VK_DEPENDENCY_VIEW_LOCAL_BIT, but multiview is not enabled for this render pass.", i);
} else if (use_rp2 && !(dependency.dependencyFlags & VK_DEPENDENCY_VIEW_LOCAL_BIT) && dependency.viewOffset != 0) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkSubpassDependency2KHR-dependencyFlags-03092",
"Dependency %u specifies the VK_DEPENDENCY_VIEW_LOCAL_BIT, but also specifies a view offset of %u.", i,
dependency.viewOffset);
} else if (dependency.srcSubpass == VK_SUBPASS_EXTERNAL || dependency.dstSubpass == VK_SUBPASS_EXTERNAL) {
if (dependency.srcSubpass == dependency.dstSubpass) {
vuid = use_rp2 ? "VUID-VkSubpassDependency2KHR-srcSubpass-03085" : "VUID-VkSubpassDependency-srcSubpass-00865";
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid,
"The src and dst subpasses in dependency %u are both external.", i);
} else if (dependency.dependencyFlags & VK_DEPENDENCY_VIEW_LOCAL_BIT) {
if (dependency.srcSubpass == VK_SUBPASS_EXTERNAL) {
vuid = "VUID-VkSubpassDependency-dependencyFlags-02520";
} else { // dependency.dstSubpass == VK_SUBPASS_EXTERNAL
vuid = "VUID-VkSubpassDependency-dependencyFlags-02521";
}
if (use_rp2) {
// Create render pass 2 distinguishes between source and destination external dependencies.
if (dependency.srcSubpass == VK_SUBPASS_EXTERNAL) {
vuid = "VUID-VkSubpassDependency2KHR-dependencyFlags-03090";
} else {
vuid = "VUID-VkSubpassDependency2KHR-dependencyFlags-03091";
}
}
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid,
"Dependency %u specifies an external dependency but also specifies VK_DEPENDENCY_VIEW_LOCAL_BIT.", i);
}
} else if (dependency.srcSubpass > dependency.dstSubpass) {
vuid = use_rp2 ? "VUID-VkSubpassDependency2KHR-srcSubpass-03084" : "VUID-VkSubpassDependency-srcSubpass-00864";
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid,
"Dependency %u specifies a dependency from a later subpass (%u) to an earlier subpass (%u), which is "
"disallowed to prevent cyclic dependencies.",
i, dependency.srcSubpass, dependency.dstSubpass);
} else if (dependency.srcSubpass == dependency.dstSubpass) {
if (dependency.viewOffset != 0) {
vuid = use_rp2 ? kVUID_Core_DrawState_InvalidRenderpass : "VUID-VkRenderPassCreateInfo-pNext-01930";
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid,
"Dependency %u specifies a self-dependency but has a non-zero view offset of %u", i,
dependency.viewOffset);
} else if ((dependency.dependencyFlags | VK_DEPENDENCY_VIEW_LOCAL_BIT) != dependency.dependencyFlags &&
pCreateInfo->pSubpasses[dependency.srcSubpass].viewMask > 1) {
vuid =
use_rp2 ? "VUID-VkRenderPassCreateInfo2KHR-pDependencies-03060" : "VUID-VkSubpassDependency-srcSubpass-00872";
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid,
"Dependency %u specifies a self-dependency for subpass %u with a non-zero view mask, but does not "
"specify VK_DEPENDENCY_VIEW_LOCAL_BIT.",
i, dependency.srcSubpass);
} else if ((HasNonFramebufferStagePipelineStageFlags(dependency.srcStageMask) ||
HasNonFramebufferStagePipelineStageFlags(dependency.dstStageMask)) &&
(GetGraphicsPipelineStageLogicalOrdinal(latest_src_stage) >
GetGraphicsPipelineStageLogicalOrdinal(earliest_dst_stage))) {
vuid = use_rp2 ? "VUID-VkSubpassDependency2KHR-srcSubpass-03087" : "VUID-VkSubpassDependency-srcSubpass-00867";
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid,
"Dependency %u specifies a self-dependency from logically-later stage (%s) to a logically-earlier stage (%s).",
i, string_VkPipelineStageFlagBits(latest_src_stage), string_VkPipelineStageFlagBits(earliest_dst_stage));
}
}
}
return skip;
}
bool CoreChecks::ValidateAttachmentIndex(RenderPassCreateVersion rp_version, uint32_t attachment, uint32_t attachment_count,
const char *type) const {
bool skip = false;
const bool use_rp2 = (rp_version == RENDER_PASS_VERSION_2);
const char *const function_name = use_rp2 ? "vkCreateRenderPass2KHR()" : "vkCreateRenderPass()";
if (attachment >= attachment_count && attachment != VK_ATTACHMENT_UNUSED) {
const char *vuid =
use_rp2 ? "VUID-VkRenderPassCreateInfo2KHR-attachment-03051" : "VUID-VkRenderPassCreateInfo-attachment-00834";
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid,
"%s: %s attachment %d must be less than the total number of attachments %d.", type, function_name,
attachment, attachment_count);
}
return skip;
}
enum AttachmentType {
ATTACHMENT_COLOR = 1,
ATTACHMENT_DEPTH = 2,
ATTACHMENT_INPUT = 4,
ATTACHMENT_PRESERVE = 8,
ATTACHMENT_RESOLVE = 16,
};
char const *StringAttachmentType(uint8_t type) {
switch (type) {
case ATTACHMENT_COLOR:
return "color";
case ATTACHMENT_DEPTH:
return "depth";
case ATTACHMENT_INPUT:
return "input";
case ATTACHMENT_PRESERVE:
return "preserve";
case ATTACHMENT_RESOLVE:
return "resolve";
default:
return "(multiple)";
}
}
bool CoreChecks::AddAttachmentUse(RenderPassCreateVersion rp_version, uint32_t subpass, std::vector<uint8_t> &attachment_uses,
std::vector<VkImageLayout> &attachment_layouts, uint32_t attachment, uint8_t new_use,
VkImageLayout new_layout) const {
if (attachment >= attachment_uses.size()) return false; /* out of range, but already reported */
bool skip = false;
auto &uses = attachment_uses[attachment];
const bool use_rp2 = (rp_version == RENDER_PASS_VERSION_2);
const char *vuid;
const char *const function_name = use_rp2 ? "vkCreateRenderPass2KHR()" : "vkCreateRenderPass()";
if (uses & new_use) {
if (attachment_layouts[attachment] != new_layout) {
vuid = use_rp2 ? "VUID-VkSubpassDescription2KHR-layout-02528" : "VUID-VkSubpassDescription-layout-02519";
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid,
"%s: subpass %u already uses attachment %u with a different image layout (%s vs %s).", function_name, subpass,
attachment, string_VkImageLayout(attachment_layouts[attachment]), string_VkImageLayout(new_layout));
}
} else if (uses & ~ATTACHMENT_INPUT || (uses && (new_use == ATTACHMENT_RESOLVE || new_use == ATTACHMENT_PRESERVE))) {
/* Note: input attachments are assumed to be done first. */
vuid = use_rp2 ? "VUID-VkSubpassDescription2KHR-pPreserveAttachments-03074"
: "VUID-VkSubpassDescription-pPreserveAttachments-00854";
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid,
"%s: subpass %u uses attachment %u as both %s and %s attachment.", function_name, subpass, attachment,
StringAttachmentType(uses), StringAttachmentType(new_use));
} else {
attachment_layouts[attachment] = new_layout;
uses |= new_use;
}
return skip;
}
bool CoreChecks::ValidateRenderpassAttachmentUsage(RenderPassCreateVersion rp_version,
const VkRenderPassCreateInfo2KHR *pCreateInfo) const {
bool skip = false;
const bool use_rp2 = (rp_version == RENDER_PASS_VERSION_2);
const char *vuid;
const char *const function_name = use_rp2 ? "vkCreateRenderPass2KHR()" : "vkCreateRenderPass()";
for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
const VkSubpassDescription2KHR &subpass = pCreateInfo->pSubpasses[i];
std::vector<uint8_t> attachment_uses(pCreateInfo->attachmentCount);
std::vector<VkImageLayout> attachment_layouts(pCreateInfo->attachmentCount);
if (subpass.pipelineBindPoint != VK_PIPELINE_BIND_POINT_GRAPHICS) {
vuid = use_rp2 ? "VUID-VkSubpassDescription2KHR-pipelineBindPoint-03062"
: "VUID-VkSubpassDescription-pipelineBindPoint-00844";
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid,
"%s: Pipeline bind point for subpass %d must be VK_PIPELINE_BIND_POINT_GRAPHICS.", function_name, i);
}
for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
auto const &attachment_ref = subpass.pInputAttachments[j];
if (attachment_ref.attachment != VK_ATTACHMENT_UNUSED) {
skip |= ValidateAttachmentIndex(rp_version, attachment_ref.attachment, pCreateInfo->attachmentCount, "Input");
if (attachment_ref.aspectMask & VK_IMAGE_ASPECT_METADATA_BIT) {
vuid =
use_rp2 ? kVUID_Core_DrawState_InvalidRenderpass : "VUID-VkInputAttachmentAspectReference-aspectMask-01964";
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid,
"%s: Aspect mask for input attachment reference %d in subpass %d includes VK_IMAGE_ASPECT_METADATA_BIT.",
function_name, i, j);
}
if (attachment_ref.attachment < pCreateInfo->attachmentCount) {
skip |= AddAttachmentUse(rp_version, i, attachment_uses, attachment_layouts, attachment_ref.attachment,
ATTACHMENT_INPUT, attachment_ref.layout);
vuid = use_rp2 ? kVUID_Core_DrawState_InvalidRenderpass : "VUID-VkRenderPassCreateInfo-pNext-01963";
skip |= ValidateImageAspectMask(VK_NULL_HANDLE, pCreateInfo->pAttachments[attachment_ref.attachment].format,
attachment_ref.aspectMask, function_name, vuid);
}
if (rp_version == RENDER_PASS_VERSION_2) {
// These are validated automatically as part of parameter validation for create renderpass 1
// as they are in a struct that only applies to input attachments - not so for v2.
// Check for 0
if (attachment_ref.aspectMask == 0) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkSubpassDescription2KHR-attachment-02800",
"%s: Input attachment (%d) aspect mask must not be 0.", function_name, j);
} else {
const VkImageAspectFlags valid_bits =
(VK_IMAGE_ASPECT_COLOR_BIT | VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT |
VK_IMAGE_ASPECT_METADATA_BIT | VK_IMAGE_ASPECT_PLANE_0_BIT | VK_IMAGE_ASPECT_PLANE_1_BIT |
VK_IMAGE_ASPECT_PLANE_2_BIT | VK_IMAGE_ASPECT_MEMORY_PLANE_0_BIT_EXT |
VK_IMAGE_ASPECT_MEMORY_PLANE_1_BIT_EXT | VK_IMAGE_ASPECT_MEMORY_PLANE_2_BIT_EXT |
VK_IMAGE_ASPECT_MEMORY_PLANE_3_BIT_EXT);
// Check for valid aspect mask bits
if (attachment_ref.aspectMask & ~valid_bits) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkSubpassDescription2KHR-attachment-02799",
"%s: Input attachment (%d) aspect mask (0x%" PRIx32 ")is invalid.", function_name, j,
attachment_ref.aspectMask);
}
}
}
}
}
for (uint32_t j = 0; j < subpass.preserveAttachmentCount; ++j) {
uint32_t attachment = subpass.pPreserveAttachments[j];
if (attachment == VK_ATTACHMENT_UNUSED) {
vuid = use_rp2 ? "VUID-VkSubpassDescription2KHR-attachment-03073" : "VUID-VkSubpassDescription-attachment-00853";
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid,
"%s: Preserve attachment (%d) must not be VK_ATTACHMENT_UNUSED.", function_name, j);
} else {
skip |= ValidateAttachmentIndex(rp_version, attachment, pCreateInfo->attachmentCount, "Preserve");
if (attachment < pCreateInfo->attachmentCount) {
skip |= AddAttachmentUse(rp_version, i, attachment_uses, attachment_layouts, attachment, ATTACHMENT_PRESERVE,
VkImageLayout(0) /* preserve doesn't have any layout */);
}
}
}
bool subpass_performs_resolve = false;
for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
if (subpass.pResolveAttachments) {
auto const &attachment_ref = subpass.pResolveAttachments[j];
if (attachment_ref.attachment != VK_ATTACHMENT_UNUSED) {
skip |= ValidateAttachmentIndex(rp_version, attachment_ref.attachment, pCreateInfo->attachmentCount, "Resolve");
if (attachment_ref.attachment < pCreateInfo->attachmentCount) {
skip |= AddAttachmentUse(rp_version, i, attachment_uses, attachment_layouts, attachment_ref.attachment,
ATTACHMENT_RESOLVE, attachment_ref.layout);
subpass_performs_resolve = true;
if (pCreateInfo->pAttachments[attachment_ref.attachment].samples != VK_SAMPLE_COUNT_1_BIT) {
vuid = use_rp2 ? "VUID-VkSubpassDescription2KHR-pResolveAttachments-03067"
: "VUID-VkSubpassDescription-pResolveAttachments-00849";
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid,
"%s: Subpass %u requests multisample resolve into attachment %u, which must "
"have VK_SAMPLE_COUNT_1_BIT but has %s.",
function_name, i, attachment_ref.attachment,
string_VkSampleCountFlagBits(pCreateInfo->pAttachments[attachment_ref.attachment].samples));
}
}
}
}
}
if (subpass.pDepthStencilAttachment) {
if (subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
skip |= ValidateAttachmentIndex(rp_version, subpass.pDepthStencilAttachment->attachment,
pCreateInfo->attachmentCount, "Depth");
if (subpass.pDepthStencilAttachment->attachment < pCreateInfo->attachmentCount) {
skip |= AddAttachmentUse(rp_version, i, attachment_uses, attachment_layouts,
subpass.pDepthStencilAttachment->attachment, ATTACHMENT_DEPTH,
subpass.pDepthStencilAttachment->layout);
}
}
}
uint32_t last_sample_count_attachment = VK_ATTACHMENT_UNUSED;
for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
auto const &attachment_ref = subpass.pColorAttachments[j];
skip |= ValidateAttachmentIndex(rp_version, attachment_ref.attachment, pCreateInfo->attachmentCount, "Color");
if (attachment_ref.attachment != VK_ATTACHMENT_UNUSED && attachment_ref.attachment < pCreateInfo->attachmentCount) {
skip |= AddAttachmentUse(rp_version, i, attachment_uses, attachment_layouts, attachment_ref.attachment,
ATTACHMENT_COLOR, attachment_ref.layout);
VkSampleCountFlagBits current_sample_count = pCreateInfo->pAttachments[attachment_ref.attachment].samples;
if (last_sample_count_attachment != VK_ATTACHMENT_UNUSED) {
VkSampleCountFlagBits last_sample_count =
pCreateInfo->pAttachments[subpass.pColorAttachments[last_sample_count_attachment].attachment].samples;
if (current_sample_count != last_sample_count) {
vuid = use_rp2 ? "VUID-VkSubpassDescription2KHR-pColorAttachments-03069"
: "VUID-VkSubpassDescription-pColorAttachments-01417";
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid,
"%s: Subpass %u attempts to render to color attachments with inconsistent sample counts."
"Color attachment ref %u has sample count %s, whereas previous color attachment ref %u has "
"sample count %s.",
function_name, i, j, string_VkSampleCountFlagBits(current_sample_count),
last_sample_count_attachment, string_VkSampleCountFlagBits(last_sample_count));
}
}
last_sample_count_attachment = j;
if (subpass_performs_resolve && current_sample_count == VK_SAMPLE_COUNT_1_BIT) {
vuid = use_rp2 ? "VUID-VkSubpassDescription2KHR-pResolveAttachments-03066"
: "VUID-VkSubpassDescription-pResolveAttachments-00848";
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid,
"%s: Subpass %u requests multisample resolve from attachment %u which has "
"VK_SAMPLE_COUNT_1_BIT.",
function_name, i, attachment_ref.attachment);
}
if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED &&
subpass.pDepthStencilAttachment->attachment < pCreateInfo->attachmentCount) {
const auto depth_stencil_sample_count =
pCreateInfo->pAttachments[subpass.pDepthStencilAttachment->attachment].samples;
if (device_extensions.vk_amd_mixed_attachment_samples) {
if (pCreateInfo->pAttachments[attachment_ref.attachment].samples > depth_stencil_sample_count) {
vuid = use_rp2 ? "VUID-VkSubpassDescription2KHR-pColorAttachments-03070"
: "VUID-VkSubpassDescription-pColorAttachments-01506";
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid,
"%s: Subpass %u pColorAttachments[%u] has %s which is larger than "
"depth/stencil attachment %s.",
function_name, i, j,
string_VkSampleCountFlagBits(pCreateInfo->pAttachments[attachment_ref.attachment].samples),
string_VkSampleCountFlagBits(depth_stencil_sample_count));
break;
}
}
if (!device_extensions.vk_amd_mixed_attachment_samples && !device_extensions.vk_nv_framebuffer_mixed_samples &&
current_sample_count != depth_stencil_sample_count) {
vuid = use_rp2 ? "VUID-VkSubpassDescription2KHR-pDepthStencilAttachment-03071"
: "VUID-VkSubpassDescription-pDepthStencilAttachment-01418";
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid,
"%s: Subpass %u attempts to render to use a depth/stencil attachment with sample count that differs "
"from color attachment %u."
"The depth attachment ref has sample count %s, whereas color attachment ref %u has sample count %s.",
function_name, i, j, string_VkSampleCountFlagBits(depth_stencil_sample_count), j,
string_VkSampleCountFlagBits(current_sample_count));
break;
}
}
}
if (subpass_performs_resolve && subpass.pResolveAttachments[j].attachment != VK_ATTACHMENT_UNUSED &&
subpass.pResolveAttachments[j].attachment < pCreateInfo->attachmentCount) {
if (attachment_ref.attachment == VK_ATTACHMENT_UNUSED) {
vuid = use_rp2 ? "VUID-VkSubpassDescription2KHR-pResolveAttachments-03065"
: "VUID-VkSubpassDescription-pResolveAttachments-00847";
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid,
"%s: Subpass %u requests multisample resolve from attachment %u which has "
"attachment=VK_ATTACHMENT_UNUSED.",
function_name, i, attachment_ref.attachment);
} else {
const auto &color_desc = pCreateInfo->pAttachments[attachment_ref.attachment];
const auto &resolve_desc = pCreateInfo->pAttachments[subpass.pResolveAttachments[j].attachment];
if (color_desc.format != resolve_desc.format) {
vuid = use_rp2 ? "VUID-VkSubpassDescription2KHR-pResolveAttachments-03068"
: "VUID-VkSubpassDescription-pResolveAttachments-00850";
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid,
"%s: Subpass %u pColorAttachments[%u] resolves to an attachment with a "
"different format. color format: %u, resolve format: %u.",
function_name, i, j, color_desc.format, resolve_desc.format);
}
}
}
}
}
return skip;
}
bool CoreChecks::ValidateCreateRenderPass(VkDevice device, RenderPassCreateVersion rp_version,
const VkRenderPassCreateInfo2KHR *pCreateInfo) const {
bool skip = false;
const bool use_rp2 = (rp_version == RENDER_PASS_VERSION_2);
const char *vuid;
const char *const function_name = use_rp2 ? "vkCreateRenderPass2KHR()" : "vkCreateRenderPass()";
// TODO: As part of wrapping up the mem_tracker/core_validation merge the following routine should be consolidated with
// ValidateLayouts.
skip |= ValidateRenderpassAttachmentUsage(rp_version, pCreateInfo);
skip |= ValidateRenderPassDAG(rp_version, pCreateInfo);
// Validate multiview correlation and view masks
bool viewMaskZero = false;
bool viewMaskNonZero = false;
for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
const VkSubpassDescription2KHR &subpass = pCreateInfo->pSubpasses[i];
if (subpass.viewMask != 0) {
viewMaskNonZero = true;
} else {
viewMaskZero = true;
}
if ((subpass.flags & VK_SUBPASS_DESCRIPTION_PER_VIEW_POSITION_X_ONLY_BIT_NVX) != 0 &&
(subpass.flags & VK_SUBPASS_DESCRIPTION_PER_VIEW_ATTRIBUTES_BIT_NVX) == 0) {
vuid = use_rp2 ? "VUID-VkSubpassDescription2KHR-flags-03076" : "VUID-VkSubpassDescription-flags-00856";
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid,
"%s: The flags parameter of subpass description %u includes "
"VK_SUBPASS_DESCRIPTION_PER_VIEW_POSITION_X_ONLY_BIT_NVX but does not also include "
"VK_SUBPASS_DESCRIPTION_PER_VIEW_ATTRIBUTES_BIT_NVX.",
function_name, i);
}
}
if (rp_version == RENDER_PASS_VERSION_2) {
if (viewMaskNonZero && viewMaskZero) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkRenderPassCreateInfo2KHR-viewMask-03058",
"%s: Some view masks are non-zero whilst others are zero.", function_name);
}
if (viewMaskZero && pCreateInfo->correlatedViewMaskCount != 0) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkRenderPassCreateInfo2KHR-viewMask-03057",
"%s: Multiview is not enabled but correlation masks are still provided", function_name);
}
}
uint32_t aggregated_cvms = 0;
for (uint32_t i = 0; i < pCreateInfo->correlatedViewMaskCount; ++i) {
if (aggregated_cvms & pCreateInfo->pCorrelatedViewMasks[i]) {
vuid = use_rp2 ? "VUID-VkRenderPassCreateInfo2KHR-pCorrelatedViewMasks-03056"
: "VUID-VkRenderPassMultiviewCreateInfo-pCorrelationMasks-00841";
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid,
"%s: pCorrelatedViewMasks[%u] contains a previously appearing view bit.", function_name, i);
}
aggregated_cvms |= pCreateInfo->pCorrelatedViewMasks[i];
}
for (uint32_t i = 0; i < pCreateInfo->dependencyCount; ++i) {
auto const &dependency = pCreateInfo->pDependencies[i];
if (rp_version == RENDER_PASS_VERSION_2) {
skip |= ValidateStageMaskGsTsEnables(
dependency.srcStageMask, function_name, "VUID-VkSubpassDependency2KHR-srcStageMask-03080",
"VUID-VkSubpassDependency2KHR-srcStageMask-03082", "VUID-VkSubpassDependency2KHR-srcStageMask-02103",
"VUID-VkSubpassDependency2KHR-srcStageMask-02104");
skip |= ValidateStageMaskGsTsEnables(
dependency.dstStageMask, function_name, "VUID-VkSubpassDependency2KHR-dstStageMask-03081",
"VUID-VkSubpassDependency2KHR-dstStageMask-03083", "VUID-VkSubpassDependency2KHR-dstStageMask-02105",
"VUID-VkSubpassDependency2KHR-dstStageMask-02106");
} else {
skip |= ValidateStageMaskGsTsEnables(
dependency.srcStageMask, function_name, "VUID-VkSubpassDependency-srcStageMask-00860",
"VUID-VkSubpassDependency-srcStageMask-00862", "VUID-VkSubpassDependency-srcStageMask-02099",
"VUID-VkSubpassDependency-srcStageMask-02100");
skip |= ValidateStageMaskGsTsEnables(
dependency.dstStageMask, function_name, "VUID-VkSubpassDependency-dstStageMask-00861",
"VUID-VkSubpassDependency-dstStageMask-00863", "VUID-VkSubpassDependency-dstStageMask-02101",
"VUID-VkSubpassDependency-dstStageMask-02102");
}
if (!ValidateAccessMaskPipelineStage(device_extensions, dependency.srcAccessMask, dependency.srcStageMask)) {
vuid = use_rp2 ? "VUID-VkSubpassDependency2KHR-srcAccessMask-03088" : "VUID-VkSubpassDependency-srcAccessMask-00868";
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid,
"%s: pDependencies[%u].srcAccessMask (0x%" PRIx32 ") is not supported by srcStageMask (0x%" PRIx32 ").",
function_name, i, dependency.srcAccessMask, dependency.srcStageMask);
}
if (!ValidateAccessMaskPipelineStage(device_extensions, dependency.dstAccessMask, dependency.dstStageMask)) {
vuid = use_rp2 ? "VUID-VkSubpassDependency2KHR-dstAccessMask-03089" : "VUID-VkSubpassDependency-dstAccessMask-00869";
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid,
"%s: pDependencies[%u].dstAccessMask (0x%" PRIx32 ") is not supported by dstStageMask (0x%" PRIx32 ").",
function_name, i, dependency.dstAccessMask, dependency.dstStageMask);
}
}
if (!skip) {
skip |= ValidateLayouts(rp_version, device, pCreateInfo);
}
return skip;
}
bool CoreChecks::PreCallValidateCreateRenderPass(VkDevice device, const VkRenderPassCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkRenderPass *pRenderPass) const {
bool skip = false;
// Handle extension structs from KHR_multiview and KHR_maintenance2 that can only be validated for RP1 (indices out of bounds)
const VkRenderPassMultiviewCreateInfo *pMultiviewInfo = lvl_find_in_chain<VkRenderPassMultiviewCreateInfo>(pCreateInfo->pNext);
if (pMultiviewInfo) {
if (pMultiviewInfo->subpassCount && pMultiviewInfo->subpassCount != pCreateInfo->subpassCount) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkRenderPassCreateInfo-pNext-01928",
"Subpass count is %u but multiview info has a subpass count of %u.", pCreateInfo->subpassCount,
pMultiviewInfo->subpassCount);
} else if (pMultiviewInfo->dependencyCount && pMultiviewInfo->dependencyCount != pCreateInfo->dependencyCount) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkRenderPassCreateInfo-pNext-01929",
"Dependency count is %u but multiview info has a dependency count of %u.", pCreateInfo->dependencyCount,
pMultiviewInfo->dependencyCount);
}
}
const VkRenderPassInputAttachmentAspectCreateInfo *pInputAttachmentAspectInfo =
lvl_find_in_chain<VkRenderPassInputAttachmentAspectCreateInfo>(pCreateInfo->pNext);
if (pInputAttachmentAspectInfo) {
for (uint32_t i = 0; i < pInputAttachmentAspectInfo->aspectReferenceCount; ++i) {
uint32_t subpass = pInputAttachmentAspectInfo->pAspectReferences[i].subpass;
uint32_t attachment = pInputAttachmentAspectInfo->pAspectReferences[i].inputAttachmentIndex;
if (subpass >= pCreateInfo->subpassCount) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkRenderPassCreateInfo-pNext-01926",
"Subpass index %u specified by input attachment aspect info %u is greater than the subpass "
"count of %u for this render pass.",
subpass, i, pCreateInfo->subpassCount);
} else if (pCreateInfo->pSubpasses && attachment >= pCreateInfo->pSubpasses[subpass].inputAttachmentCount) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkRenderPassCreateInfo-pNext-01927",
"Input attachment index %u specified by input attachment aspect info %u is greater than the "
"input attachment count of %u for this subpass.",
attachment, i, pCreateInfo->pSubpasses[subpass].inputAttachmentCount);
}
}
}
const VkRenderPassFragmentDensityMapCreateInfoEXT *pFragmentDensityMapInfo =
lvl_find_in_chain<VkRenderPassFragmentDensityMapCreateInfoEXT>(pCreateInfo->pNext);
if (pFragmentDensityMapInfo) {
if (pFragmentDensityMapInfo->fragmentDensityMapAttachment.attachment != VK_ATTACHMENT_UNUSED) {
if (pFragmentDensityMapInfo->fragmentDensityMapAttachment.attachment >= pCreateInfo->attachmentCount) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkRenderPassFragmentDensityMapCreateInfoEXT-fragmentDensityMapAttachment-02547",
"fragmentDensityMapAttachment %u must be less than attachmentCount %u of for this render pass.",
pFragmentDensityMapInfo->fragmentDensityMapAttachment.attachment, pCreateInfo->attachmentCount);
} else {
if (!(pFragmentDensityMapInfo->fragmentDensityMapAttachment.layout ==
VK_IMAGE_LAYOUT_FRAGMENT_DENSITY_MAP_OPTIMAL_EXT ||
pFragmentDensityMapInfo->fragmentDensityMapAttachment.layout == VK_IMAGE_LAYOUT_GENERAL)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkRenderPassFragmentDensityMapCreateInfoEXT-fragmentDensityMapAttachment-02549",
"Layout of fragmentDensityMapAttachment %u' must be equal to "
"VK_IMAGE_LAYOUT_FRAGMENT_DENSITY_MAP_OPTIMAL_EXT, or VK_IMAGE_LAYOUT_GENERAL.",
pFragmentDensityMapInfo->fragmentDensityMapAttachment.attachment);
}
if (!(pCreateInfo->pAttachments[pFragmentDensityMapInfo->fragmentDensityMapAttachment.attachment].loadOp ==
VK_ATTACHMENT_LOAD_OP_LOAD ||
pCreateInfo->pAttachments[pFragmentDensityMapInfo->fragmentDensityMapAttachment.attachment].loadOp ==
VK_ATTACHMENT_LOAD_OP_DONT_CARE)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkRenderPassFragmentDensityMapCreateInfoEXT-fragmentDensityMapAttachment-02550",
"FragmentDensityMapAttachment %u' must reference an attachment with a loadOp "
"equal to VK_ATTACHMENT_LOAD_OP_LOAD or VK_ATTACHMENT_LOAD_OP_DONT_CARE.",
pFragmentDensityMapInfo->fragmentDensityMapAttachment.attachment);
}
if (pCreateInfo->pAttachments[pFragmentDensityMapInfo->fragmentDensityMapAttachment.attachment].storeOp !=
VK_ATTACHMENT_STORE_OP_DONT_CARE) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkRenderPassFragmentDensityMapCreateInfoEXT-fragmentDensityMapAttachment-02551",
"FragmentDensityMapAttachment %u' must reference an attachment with a storeOp "
"equal to VK_ATTACHMENT_STORE_OP_DONT_CARE.",
pFragmentDensityMapInfo->fragmentDensityMapAttachment.attachment);
}
}
}
}
if (!skip) {
safe_VkRenderPassCreateInfo2KHR create_info_2;
ConvertVkRenderPassCreateInfoToV2KHR(*pCreateInfo, &create_info_2);
skip |= ValidateCreateRenderPass(device, RENDER_PASS_VERSION_1, create_info_2.ptr());
}
return skip;
}
static bool ValidateDepthStencilResolve(const debug_report_data *report_data,
const VkPhysicalDeviceDepthStencilResolvePropertiesKHR &depth_stencil_resolve_props,
const VkRenderPassCreateInfo2KHR *pCreateInfo) {
bool skip = false;
// If the pNext list of VkSubpassDescription2KHR includes a VkSubpassDescriptionDepthStencilResolveKHR structure,
// then that structure describes depth/stencil resolve operations for the subpass.
for (uint32_t i = 0; i < pCreateInfo->subpassCount; i++) {
const VkSubpassDescription2KHR &subpass = pCreateInfo->pSubpasses[i];
const auto *resolve = lvl_find_in_chain<VkSubpassDescriptionDepthStencilResolveKHR>(subpass.pNext);
if (resolve == nullptr) {
continue;
}
const bool resolve_attachment_not_unused = (resolve->pDepthStencilResolveAttachment != nullptr &&
resolve->pDepthStencilResolveAttachment->attachment != VK_ATTACHMENT_UNUSED);
const bool valid_resolve_attachment_index =
(resolve_attachment_not_unused && resolve->pDepthStencilResolveAttachment->attachment < pCreateInfo->attachmentCount);
const bool ds_attachment_not_unused =
(subpass.pDepthStencilAttachment != nullptr && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED);
const bool valid_ds_attachment_index =
(ds_attachment_not_unused && subpass.pDepthStencilAttachment->attachment < pCreateInfo->attachmentCount);
if (resolve_attachment_not_unused && subpass.pDepthStencilAttachment != nullptr &&
subpass.pDepthStencilAttachment->attachment == VK_ATTACHMENT_UNUSED) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkSubpassDescriptionDepthStencilResolveKHR-pDepthStencilResolveAttachment-03177",
"vkCreateRenderPass2KHR(): Subpass %u includes a VkSubpassDescriptionDepthStencilResolveKHR "
"structure with resolve attachment %u, but pDepthStencilAttachment=VK_ATTACHMENT_UNUSED.",
i, resolve->pDepthStencilResolveAttachment->attachment);
}
if (resolve_attachment_not_unused && resolve->depthResolveMode == VK_RESOLVE_MODE_NONE_KHR &&
resolve->stencilResolveMode == VK_RESOLVE_MODE_NONE_KHR) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkSubpassDescriptionDepthStencilResolveKHR-pDepthStencilResolveAttachment-03178",
"vkCreateRenderPass2KHR(): Subpass %u includes a VkSubpassDescriptionDepthStencilResolveKHR "
"structure with resolve attachment %u, but both depth and stencil resolve modes are "
"VK_RESOLVE_MODE_NONE_KHR.",
i, resolve->pDepthStencilResolveAttachment->attachment);
}
if (resolve_attachment_not_unused && valid_ds_attachment_index &&
pCreateInfo->pAttachments[subpass.pDepthStencilAttachment->attachment].samples == VK_SAMPLE_COUNT_1_BIT) {
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkSubpassDescriptionDepthStencilResolveKHR-pDepthStencilResolveAttachment-03179",
"vkCreateRenderPass2KHR(): Subpass %u includes a VkSubpassDescriptionDepthStencilResolveKHR "
"structure with resolve attachment %u. However pDepthStencilAttachment has sample count=VK_SAMPLE_COUNT_1_BIT.",
i, resolve->pDepthStencilResolveAttachment->attachment);
}
if (valid_resolve_attachment_index &&
pCreateInfo->pAttachments[resolve->pDepthStencilResolveAttachment->attachment].samples != VK_SAMPLE_COUNT_1_BIT) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkSubpassDescriptionDepthStencilResolveKHR-pDepthStencilResolveAttachment-03180",
"vkCreateRenderPass2KHR(): Subpass %u includes a VkSubpassDescriptionDepthStencilResolveKHR "
"structure with resolve attachment %u which has sample count=VK_SAMPLE_COUNT_1_BIT.",
i, resolve->pDepthStencilResolveAttachment->attachment);
}
VkFormat pDepthStencilAttachmentFormat =
(valid_ds_attachment_index ? pCreateInfo->pAttachments[subpass.pDepthStencilAttachment->attachment].format
: VK_FORMAT_UNDEFINED);
VkFormat pDepthStencilResolveAttachmentFormat =
(valid_resolve_attachment_index ? pCreateInfo->pAttachments[resolve->pDepthStencilResolveAttachment->attachment].format
: VK_FORMAT_UNDEFINED);
if (valid_ds_attachment_index && valid_resolve_attachment_index &&
((FormatDepthSize(pDepthStencilAttachmentFormat) != FormatDepthSize(pDepthStencilResolveAttachmentFormat)) ||
(FormatDepthNumericalType(pDepthStencilAttachmentFormat) !=
FormatDepthNumericalType(pDepthStencilResolveAttachmentFormat)))) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkSubpassDescriptionDepthStencilResolveKHR-pDepthStencilResolveAttachment-03181",
"vkCreateRenderPass2KHR(): Subpass %u includes a VkSubpassDescriptionDepthStencilResolveKHR "
"structure with resolve attachment %u which has a depth component (size %u). The depth component "
"of pDepthStencilAttachment must have the same number of bits (currently %u) and the same numerical type.",
i, resolve->pDepthStencilResolveAttachment->attachment,
FormatDepthSize(pDepthStencilResolveAttachmentFormat), FormatDepthSize(pDepthStencilAttachmentFormat));
}
if (valid_ds_attachment_index && valid_resolve_attachment_index &&
((FormatStencilSize(pDepthStencilAttachmentFormat) != FormatStencilSize(pDepthStencilResolveAttachmentFormat)) ||
(FormatStencilNumericalType(pDepthStencilAttachmentFormat) !=
FormatStencilNumericalType(pDepthStencilResolveAttachmentFormat)))) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkSubpassDescriptionDepthStencilResolveKHR-pDepthStencilResolveAttachment-03182",
"vkCreateRenderPass2KHR(): Subpass %u includes a VkSubpassDescriptionDepthStencilResolveKHR "
"structure with resolve attachment %u which has a stencil component (size %u). The stencil component "
"of pDepthStencilAttachment must have the same number of bits (currently %u) and the same numerical type.",
i, resolve->pDepthStencilResolveAttachment->attachment,
FormatStencilSize(pDepthStencilResolveAttachmentFormat), FormatStencilSize(pDepthStencilAttachmentFormat));
}
if (!(resolve->depthResolveMode == VK_RESOLVE_MODE_NONE_KHR ||
resolve->depthResolveMode & depth_stencil_resolve_props.supportedDepthResolveModes)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkSubpassDescriptionDepthStencilResolveKHR-depthResolveMode-03183",
"vkCreateRenderPass2KHR(): Subpass %u includes a VkSubpassDescriptionDepthStencilResolveKHR "
"structure with invalid depthResolveMode=%u.",
i, resolve->depthResolveMode);
}
if (!(resolve->stencilResolveMode == VK_RESOLVE_MODE_NONE_KHR ||
resolve->stencilResolveMode & depth_stencil_resolve_props.supportedStencilResolveModes)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkSubpassDescriptionDepthStencilResolveKHR-stencilResolveMode-03184",
"vkCreateRenderPass2KHR(): Subpass %u includes a VkSubpassDescriptionDepthStencilResolveKHR "
"structure with invalid stencilResolveMode=%u.",
i, resolve->stencilResolveMode);
}
if (valid_resolve_attachment_index && FormatIsDepthAndStencil(pDepthStencilResolveAttachmentFormat) &&
depth_stencil_resolve_props.independentResolve == VK_FALSE &&
depth_stencil_resolve_props.independentResolveNone == VK_FALSE &&
!(resolve->depthResolveMode == resolve->stencilResolveMode)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkSubpassDescriptionDepthStencilResolveKHR-pDepthStencilResolveAttachment-03185",
"vkCreateRenderPass2KHR(): Subpass %u includes a VkSubpassDescriptionDepthStencilResolveKHR "
"structure. The values of depthResolveMode (%u) and stencilResolveMode (%u) must be identical.",
i, resolve->depthResolveMode, resolve->stencilResolveMode);
}
if (valid_resolve_attachment_index && FormatIsDepthAndStencil(pDepthStencilResolveAttachmentFormat) &&
depth_stencil_resolve_props.independentResolve == VK_FALSE &&
depth_stencil_resolve_props.independentResolveNone == VK_TRUE &&
!(resolve->depthResolveMode == resolve->stencilResolveMode || resolve->depthResolveMode == VK_RESOLVE_MODE_NONE_KHR ||
resolve->stencilResolveMode == VK_RESOLVE_MODE_NONE_KHR)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkSubpassDescriptionDepthStencilResolveKHR-pDepthStencilResolveAttachment-03186",
"vkCreateRenderPass2KHR(): Subpass %u includes a VkSubpassDescriptionDepthStencilResolveKHR "
"structure. The values of depthResolveMode (%u) and stencilResolveMode (%u) must be identical, or "
"one of them must be %u.",
i, resolve->depthResolveMode, resolve->stencilResolveMode, VK_RESOLVE_MODE_NONE_KHR);
}
}
return skip;
}
bool CoreChecks::PreCallValidateCreateRenderPass2KHR(VkDevice device, const VkRenderPassCreateInfo2KHR *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkRenderPass *pRenderPass) const {
bool skip = false;
if (device_extensions.vk_khr_depth_stencil_resolve) {
skip |= ValidateDepthStencilResolve(report_data, phys_dev_ext_props.depth_stencil_resolve_props, pCreateInfo);
}
safe_VkRenderPassCreateInfo2KHR create_info_2(pCreateInfo);
skip |= ValidateCreateRenderPass(device, RENDER_PASS_VERSION_2, create_info_2.ptr());
return skip;
}
bool CoreChecks::ValidatePrimaryCommandBuffer(const CMD_BUFFER_STATE *pCB, char const *cmd_name, const char *error_code) const {
bool skip = false;
if (pCB->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(pCB->commandBuffer), error_code, "Cannot execute command %s on a secondary command buffer.",
cmd_name);
}
return skip;
}
bool CoreChecks::VerifyRenderAreaBounds(const VkRenderPassBeginInfo *pRenderPassBegin) const {
bool skip = false;
const safe_VkFramebufferCreateInfo *pFramebufferInfo = &GetFramebufferState(pRenderPassBegin->framebuffer)->createInfo;
if (pRenderPassBegin->renderArea.offset.x < 0 ||
(pRenderPassBegin->renderArea.offset.x + pRenderPassBegin->renderArea.extent.width) > pFramebufferInfo->width ||
pRenderPassBegin->renderArea.offset.y < 0 ||
(pRenderPassBegin->renderArea.offset.y + pRenderPassBegin->renderArea.extent.height) > pFramebufferInfo->height) {
skip |= static_cast<bool>(log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
kVUID_Core_DrawState_InvalidRenderArea,
"Cannot execute a render pass with renderArea not within the bound of the framebuffer. RenderArea: x %d, y %d, width "
"%d, height %d. Framebuffer: width %d, height %d.",
pRenderPassBegin->renderArea.offset.x, pRenderPassBegin->renderArea.offset.y, pRenderPassBegin->renderArea.extent.width,
pRenderPassBegin->renderArea.extent.height, pFramebufferInfo->width, pFramebufferInfo->height));
}
return skip;
}
bool CoreChecks::VerifyFramebufferAndRenderPassImageViews(const VkRenderPassBeginInfo *pRenderPassBeginInfo) const {
bool skip = false;
const VkRenderPassAttachmentBeginInfoKHR *pRenderPassAttachmentBeginInfo =
lvl_find_in_chain<VkRenderPassAttachmentBeginInfoKHR>(pRenderPassBeginInfo->pNext);
if (pRenderPassAttachmentBeginInfo && pRenderPassAttachmentBeginInfo->attachmentCount != 0) {
const safe_VkFramebufferCreateInfo *pFramebufferCreateInfo =
&GetFramebufferState(pRenderPassBeginInfo->framebuffer)->createInfo;
const VkFramebufferAttachmentsCreateInfoKHR *pFramebufferAttachmentsCreateInfo =
lvl_find_in_chain<VkFramebufferAttachmentsCreateInfoKHR>(pFramebufferCreateInfo->pNext);
if ((pFramebufferCreateInfo->flags & VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT_KHR) == 0) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
HandleToUint64(pRenderPassBeginInfo->renderPass), "VUID-VkRenderPassBeginInfo-framebuffer-03207",
"VkRenderPassBeginInfo: Image views specified at render pass begin, but framebuffer not created with "
"VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT_KHR");
} else if (pFramebufferAttachmentsCreateInfo) {
if (pFramebufferAttachmentsCreateInfo->attachmentImageInfoCount != pRenderPassAttachmentBeginInfo->attachmentCount) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
HandleToUint64(pRenderPassBeginInfo->renderPass), "VUID-VkRenderPassBeginInfo-framebuffer-03208",
"VkRenderPassBeginInfo: %u image views specified at render pass begin, but framebuffer "
"created expecting %u attachments",
pRenderPassAttachmentBeginInfo->attachmentCount,
pFramebufferAttachmentsCreateInfo->attachmentImageInfoCount);
} else {
const safe_VkRenderPassCreateInfo2KHR *pRenderPassCreateInfo =
&GetRenderPassState(pRenderPassBeginInfo->renderPass)->createInfo;
for (uint32_t i = 0; i < pRenderPassAttachmentBeginInfo->attachmentCount; ++i) {
const VkImageViewCreateInfo *pImageViewCreateInfo =
&GetImageViewState(pRenderPassAttachmentBeginInfo->pAttachments[i])->create_info;
const VkFramebufferAttachmentImageInfoKHR *pFramebufferAttachmentImageInfo =
&pFramebufferAttachmentsCreateInfo->pAttachmentImageInfos[i];
const VkImageCreateInfo *pImageCreateInfo = &GetImageState(pImageViewCreateInfo->image)->createInfo;
if (pFramebufferAttachmentImageInfo->flags != pImageCreateInfo->flags) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
HandleToUint64(pRenderPassBeginInfo->renderPass),
"VUID-VkRenderPassBeginInfo-framebuffer-03209",
"VkRenderPassBeginInfo: Image view #%u created from an image with flags set as 0x%X, "
"but image info #%u used to create the framebuffer had flags set as 0x%X",
i, pImageCreateInfo->flags, i, pFramebufferAttachmentImageInfo->flags);
}
if (pFramebufferAttachmentImageInfo->usage != pImageCreateInfo->usage) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
HandleToUint64(pRenderPassBeginInfo->renderPass),
"VUID-VkRenderPassBeginInfo-framebuffer-03210",
"VkRenderPassBeginInfo: Image view #%u created from an image with usage set as 0x%X, "
"but image info #%u used to create the framebuffer had usage set as 0x%X",
i, pImageCreateInfo->usage, i, pFramebufferAttachmentImageInfo->usage);
}
if (pFramebufferAttachmentImageInfo->width != pImageCreateInfo->extent.width) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
HandleToUint64(pRenderPassBeginInfo->renderPass),
"VUID-VkRenderPassBeginInfo-framebuffer-03211",
"VkRenderPassBeginInfo: Image view #%u created from an image with width set as %u, "
"but image info #%u used to create the framebuffer had width set as %u",
i, pImageCreateInfo->extent.width, i, pFramebufferAttachmentImageInfo->width);
}
if (pFramebufferAttachmentImageInfo->height != pImageCreateInfo->extent.height) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
HandleToUint64(pRenderPassBeginInfo->renderPass),
"VUID-VkRenderPassBeginInfo-framebuffer-03212",
"VkRenderPassBeginInfo: Image view #%u created from an image with height set as %u, "
"but image info #%u used to create the framebuffer had height set as %u",
i, pImageCreateInfo->extent.height, i, pFramebufferAttachmentImageInfo->height);
}
if (pFramebufferAttachmentImageInfo->layerCount != pImageViewCreateInfo->subresourceRange.layerCount) {
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
HandleToUint64(pRenderPassBeginInfo->renderPass), "VUID-VkRenderPassBeginInfo-framebuffer-03213",
"VkRenderPassBeginInfo: Image view #%u created with a subresource range with a layerCount of %u, "
"but image info #%u used to create the framebuffer had layerCount set as %u",
i, pImageViewCreateInfo->subresourceRange.layerCount, i, pFramebufferAttachmentImageInfo->layerCount);
}
const VkImageFormatListCreateInfoKHR *pImageFormatListCreateInfo =
lvl_find_in_chain<VkImageFormatListCreateInfoKHR>(pImageCreateInfo->pNext);
if (pImageFormatListCreateInfo) {
if (pImageFormatListCreateInfo->viewFormatCount != pFramebufferAttachmentImageInfo->viewFormatCount) {
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
HandleToUint64(pRenderPassBeginInfo->renderPass), "VUID-VkRenderPassBeginInfo-framebuffer-03214",
"VkRenderPassBeginInfo: Image view #%u created with an image with a viewFormatCount of %u, "
"but image info #%u used to create the framebuffer had viewFormatCount set as %u",
i, pImageFormatListCreateInfo->viewFormatCount, i,
pFramebufferAttachmentImageInfo->viewFormatCount);
}
for (uint32_t j = 0; j < pImageFormatListCreateInfo->viewFormatCount; ++j) {
bool formatFound = false;
for (uint32_t k = 0; k < pFramebufferAttachmentImageInfo->viewFormatCount; ++k) {
if (pImageFormatListCreateInfo->pViewFormats[j] ==
pFramebufferAttachmentImageInfo->pViewFormats[k]) {
formatFound = true;
}
}
if (!formatFound) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
HandleToUint64(pRenderPassBeginInfo->renderPass),
"VUID-VkRenderPassBeginInfo-framebuffer-03215",
"VkRenderPassBeginInfo: Image view #%u created with an image including the format "
"%s in its view format list, "
"but image info #%u used to create the framebuffer does not include this format",
i, string_VkFormat(pImageFormatListCreateInfo->pViewFormats[j]), i);
}
}
}
if (pRenderPassCreateInfo->pAttachments[i].format != pImageViewCreateInfo->format) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
HandleToUint64(pRenderPassBeginInfo->renderPass),
"VUID-VkRenderPassBeginInfo-framebuffer-03216",
"VkRenderPassBeginInfo: Image view #%u created with a format of %s, "
"but render pass attachment description #%u created with a format of %s",
i, string_VkFormat(pImageViewCreateInfo->format), i,
string_VkFormat(pRenderPassCreateInfo->pAttachments[i].format));
}
if (pRenderPassCreateInfo->pAttachments[i].samples != pImageCreateInfo->samples) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
HandleToUint64(pRenderPassBeginInfo->renderPass),
"VUID-VkRenderPassBeginInfo-framebuffer-03217",
"VkRenderPassBeginInfo: Image view #%u created with an image with %s samples, "
"but render pass attachment description #%u created with %s samples",
i, string_VkSampleCountFlagBits(pImageCreateInfo->samples), i,
string_VkSampleCountFlagBits(pRenderPassCreateInfo->pAttachments[i].samples));
}
if (pImageViewCreateInfo->subresourceRange.levelCount != 1) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT,
HandleToUint64(pRenderPassAttachmentBeginInfo->pAttachments[i]),
"VUID-VkRenderPassAttachmentBeginInfoKHR-pAttachments-03218",
"VkRenderPassAttachmentBeginInfo: Image view #%u created with multiple (%u) mip levels.", i,
pImageViewCreateInfo->subresourceRange.levelCount);
}
if (((pImageViewCreateInfo->components.r != VK_COMPONENT_SWIZZLE_IDENTITY) &&
(pImageViewCreateInfo->components.r != VK_COMPONENT_SWIZZLE_R)) ||
((pImageViewCreateInfo->components.g != VK_COMPONENT_SWIZZLE_IDENTITY) &&
(pImageViewCreateInfo->components.g != VK_COMPONENT_SWIZZLE_G)) ||
((pImageViewCreateInfo->components.b != VK_COMPONENT_SWIZZLE_IDENTITY) &&
(pImageViewCreateInfo->components.b != VK_COMPONENT_SWIZZLE_B)) ||
((pImageViewCreateInfo->components.a != VK_COMPONENT_SWIZZLE_IDENTITY) &&
(pImageViewCreateInfo->components.a != VK_COMPONENT_SWIZZLE_A))) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT,
HandleToUint64(pRenderPassAttachmentBeginInfo->pAttachments[i]),
"VUID-VkRenderPassAttachmentBeginInfoKHR-pAttachments-03219",
"VkRenderPassAttachmentBeginInfo: Image view #%u created with non-identity swizzle. All "
"framebuffer attachments must have been created with the identity swizzle. Here are the actual "
"swizzle values:\n"
"r swizzle = %s\n"
"g swizzle = %s\n"
"b swizzle = %s\n"
"a swizzle = %s\n",
i, string_VkComponentSwizzle(pImageViewCreateInfo->components.r),
string_VkComponentSwizzle(pImageViewCreateInfo->components.g),
string_VkComponentSwizzle(pImageViewCreateInfo->components.b),
string_VkComponentSwizzle(pImageViewCreateInfo->components.a));
}
}
}
}
}
return skip;
}
// If this is a stencil format, make sure the stencil[Load|Store]Op flag is checked, while if it is a depth/color attachment the
// [load|store]Op flag must be checked
// TODO: The memory valid flag in DEVICE_MEMORY_STATE should probably be split to track the validity of stencil memory separately.
template <typename T>
static bool FormatSpecificLoadAndStoreOpSettings(VkFormat format, T color_depth_op, T stencil_op, T op) {
if (color_depth_op != op && stencil_op != op) {
return false;
}
bool check_color_depth_load_op = !FormatIsStencilOnly(format);
bool check_stencil_load_op = FormatIsDepthAndStencil(format) || !check_color_depth_load_op;
return ((check_color_depth_load_op && (color_depth_op == op)) || (check_stencil_load_op && (stencil_op == op)));
}
bool CoreChecks::ValidateCmdBeginRenderPass(VkCommandBuffer commandBuffer, RenderPassCreateVersion rp_version,
const VkRenderPassBeginInfo *pRenderPassBegin) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
auto render_pass_state = pRenderPassBegin ? GetRenderPassState(pRenderPassBegin->renderPass) : nullptr;
auto framebuffer = pRenderPassBegin ? GetFramebufferState(pRenderPassBegin->framebuffer) : nullptr;
bool skip = false;
const bool use_rp2 = (rp_version == RENDER_PASS_VERSION_2);
const char *vuid;
const char *const function_name = use_rp2 ? "vkCmdBeginRenderPass2KHR()" : "vkCmdBeginRenderPass()";
if (render_pass_state) {
uint32_t clear_op_size = 0; // Make sure pClearValues is at least as large as last LOAD_OP_CLEAR
// Handle extension struct from EXT_sample_locations
const VkRenderPassSampleLocationsBeginInfoEXT *pSampleLocationsBeginInfo =
lvl_find_in_chain<VkRenderPassSampleLocationsBeginInfoEXT>(pRenderPassBegin->pNext);
if (pSampleLocationsBeginInfo) {
for (uint32_t i = 0; i < pSampleLocationsBeginInfo->attachmentInitialSampleLocationsCount; ++i) {
if (pSampleLocationsBeginInfo->pAttachmentInitialSampleLocations[i].attachmentIndex >=
render_pass_state->createInfo.attachmentCount) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkAttachmentSampleLocationsEXT-attachmentIndex-01531",
"Attachment index %u specified by attachment sample locations %u is greater than the "
"attachment count of %u for the render pass being begun.",
pSampleLocationsBeginInfo->pAttachmentInitialSampleLocations[i].attachmentIndex, i,
render_pass_state->createInfo.attachmentCount);
}
}
for (uint32_t i = 0; i < pSampleLocationsBeginInfo->postSubpassSampleLocationsCount; ++i) {
if (pSampleLocationsBeginInfo->pPostSubpassSampleLocations[i].subpassIndex >=
render_pass_state->createInfo.subpassCount) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkSubpassSampleLocationsEXT-subpassIndex-01532",
"Subpass index %u specified by subpass sample locations %u is greater than the subpass count "
"of %u for the render pass being begun.",
pSampleLocationsBeginInfo->pPostSubpassSampleLocations[i].subpassIndex, i,
render_pass_state->createInfo.subpassCount);
}
}
}
for (uint32_t i = 0; i < render_pass_state->createInfo.attachmentCount; ++i) {
auto pAttachment = &render_pass_state->createInfo.pAttachments[i];
if (FormatSpecificLoadAndStoreOpSettings(pAttachment->format, pAttachment->loadOp, pAttachment->stencilLoadOp,
VK_ATTACHMENT_LOAD_OP_CLEAR)) {
clear_op_size = static_cast<uint32_t>(i) + 1;
}
}
if (clear_op_size > pRenderPassBegin->clearValueCount) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
HandleToUint64(render_pass_state->renderPass), "VUID-VkRenderPassBeginInfo-clearValueCount-00902",
"In %s the VkRenderPassBeginInfo struct has a clearValueCount of %u but there "
"must be at least %u entries in pClearValues array to account for the highest index attachment in "
"%s that uses VK_ATTACHMENT_LOAD_OP_CLEAR is %u. Note that the pClearValues array is indexed by "
"attachment number so even if some pClearValues entries between 0 and %u correspond to attachments "
"that aren't cleared they will be ignored.",
function_name, pRenderPassBegin->clearValueCount, clear_op_size,
report_data->FormatHandle(render_pass_state->renderPass).c_str(), clear_op_size, clear_op_size - 1);
}
skip |= VerifyFramebufferAndRenderPassImageViews(pRenderPassBegin);
skip |= VerifyRenderAreaBounds(pRenderPassBegin);
skip |= VerifyFramebufferAndRenderPassLayouts(rp_version, cb_state, pRenderPassBegin,
GetFramebufferState(pRenderPassBegin->framebuffer));
if (framebuffer->rp_state->renderPass != render_pass_state->renderPass) {
skip |= ValidateRenderPassCompatibility("render pass", render_pass_state, "framebuffer", framebuffer->rp_state.get(),
function_name, "VUID-VkRenderPassBeginInfo-renderPass-00904");
}
vuid = use_rp2 ? "VUID-vkCmdBeginRenderPass2KHR-renderpass" : "VUID-vkCmdBeginRenderPass-renderpass";
skip |= InsideRenderPass(cb_state, function_name, vuid);
skip |= ValidateDependencies(framebuffer, render_pass_state);
vuid = use_rp2 ? "VUID-vkCmdBeginRenderPass2KHR-bufferlevel" : "VUID-vkCmdBeginRenderPass-bufferlevel";
skip |= ValidatePrimaryCommandBuffer(cb_state, function_name, vuid);
vuid = use_rp2 ? "VUID-vkCmdBeginRenderPass2KHR-commandBuffer-cmdpool" : "VUID-vkCmdBeginRenderPass-commandBuffer-cmdpool";
skip |= ValidateCmdQueueFlags(cb_state, function_name, VK_QUEUE_GRAPHICS_BIT, vuid);
const CMD_TYPE cmd_type = use_rp2 ? CMD_BEGINRENDERPASS2KHR : CMD_BEGINRENDERPASS;
skip |= ValidateCmd(cb_state, cmd_type, function_name);
}
auto chained_device_group_struct = lvl_find_in_chain<VkDeviceGroupRenderPassBeginInfo>(pRenderPassBegin->pNext);
if (chained_device_group_struct) {
skip |= ValidateDeviceMaskToPhysicalDeviceCount(
chained_device_group_struct->deviceMask, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
HandleToUint64(pRenderPassBegin->renderPass), "VUID-VkDeviceGroupRenderPassBeginInfo-deviceMask-00905");
skip |= ValidateDeviceMaskToZero(chained_device_group_struct->deviceMask, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
HandleToUint64(pRenderPassBegin->renderPass),
"VUID-VkDeviceGroupRenderPassBeginInfo-deviceMask-00906");
skip |= ValidateDeviceMaskToCommandBuffer(
cb_state, chained_device_group_struct->deviceMask, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
HandleToUint64(pRenderPassBegin->renderPass), "VUID-VkDeviceGroupRenderPassBeginInfo-deviceMask-00907");
if (chained_device_group_struct->deviceRenderAreaCount != 0 &&
chained_device_group_struct->deviceRenderAreaCount != physical_device_count) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
HandleToUint64(pRenderPassBegin->renderPass),
"VUID-VkDeviceGroupRenderPassBeginInfo-deviceRenderAreaCount-00908",
"deviceRenderAreaCount[%" PRIu32 "] is invaild. Physical device count is %" PRIu32 ".",
chained_device_group_struct->deviceRenderAreaCount, physical_device_count);
}
}
return skip;
}
bool CoreChecks::PreCallValidateCmdBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
VkSubpassContents contents) const {
bool skip = ValidateCmdBeginRenderPass(commandBuffer, RENDER_PASS_VERSION_1, pRenderPassBegin);
return skip;
}
bool CoreChecks::PreCallValidateCmdBeginRenderPass2KHR(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
const VkSubpassBeginInfoKHR *pSubpassBeginInfo) const {
bool skip = ValidateCmdBeginRenderPass(commandBuffer, RENDER_PASS_VERSION_2, pRenderPassBegin);
return skip;
}
void CoreChecks::RecordCmdBeginRenderPassLayouts(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
const VkSubpassContents contents) {
CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
auto render_pass_state = pRenderPassBegin ? GetRenderPassState(pRenderPassBegin->renderPass) : nullptr;
auto framebuffer = pRenderPassBegin ? GetFramebufferState(pRenderPassBegin->framebuffer) : nullptr;
if (render_pass_state) {
// transition attachments to the correct layouts for beginning of renderPass and first subpass
TransitionBeginRenderPassLayouts(cb_state, render_pass_state, framebuffer);
}
}
void CoreChecks::PreCallRecordCmdBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
VkSubpassContents contents) {
StateTracker::PreCallRecordCmdBeginRenderPass(commandBuffer, pRenderPassBegin, contents);
RecordCmdBeginRenderPassLayouts(commandBuffer, pRenderPassBegin, contents);
}
void CoreChecks::PreCallRecordCmdBeginRenderPass2KHR(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
const VkSubpassBeginInfoKHR *pSubpassBeginInfo) {
StateTracker::PreCallRecordCmdBeginRenderPass2KHR(commandBuffer, pRenderPassBegin, pSubpassBeginInfo);
RecordCmdBeginRenderPassLayouts(commandBuffer, pRenderPassBegin, pSubpassBeginInfo->contents);
}
bool CoreChecks::ValidateCmdNextSubpass(RenderPassCreateVersion rp_version, VkCommandBuffer commandBuffer) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = false;
const bool use_rp2 = (rp_version == RENDER_PASS_VERSION_2);
const char *vuid;
const char *const function_name = use_rp2 ? "vkCmdNextSubpass2KHR()" : "vkCmdNextSubpass()";
vuid = use_rp2 ? "VUID-vkCmdNextSubpass2KHR-bufferlevel" : "VUID-vkCmdNextSubpass-bufferlevel";
skip |= ValidatePrimaryCommandBuffer(cb_state, function_name, vuid);
vuid = use_rp2 ? "VUID-vkCmdNextSubpass2KHR-commandBuffer-cmdpool" : "VUID-vkCmdNextSubpass-commandBuffer-cmdpool";
skip |= ValidateCmdQueueFlags(cb_state, function_name, VK_QUEUE_GRAPHICS_BIT, vuid);
const CMD_TYPE cmd_type = use_rp2 ? CMD_NEXTSUBPASS2KHR : CMD_NEXTSUBPASS;
skip |= ValidateCmd(cb_state, cmd_type, function_name);
vuid = use_rp2 ? "VUID-vkCmdNextSubpass2KHR-renderpass" : "VUID-vkCmdNextSubpass-renderpass";
skip |= OutsideRenderPass(cb_state, function_name, vuid);
auto subpassCount = cb_state->activeRenderPass->createInfo.subpassCount;
if (cb_state->activeSubpass == subpassCount - 1) {
vuid = use_rp2 ? "VUID-vkCmdNextSubpass2KHR-None-03102" : "VUID-vkCmdNextSubpass-None-00909";
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), vuid, "%s: Attempted to advance beyond final subpass.", function_name);
}
return skip;
}
bool CoreChecks::PreCallValidateCmdNextSubpass(VkCommandBuffer commandBuffer, VkSubpassContents contents) const {
return ValidateCmdNextSubpass(RENDER_PASS_VERSION_1, commandBuffer);
}
bool CoreChecks::PreCallValidateCmdNextSubpass2KHR(VkCommandBuffer commandBuffer, const VkSubpassBeginInfoKHR *pSubpassBeginInfo,
const VkSubpassEndInfoKHR *pSubpassEndInfo) const {
return ValidateCmdNextSubpass(RENDER_PASS_VERSION_2, commandBuffer);
}
void CoreChecks::RecordCmdNextSubpassLayouts(VkCommandBuffer commandBuffer, VkSubpassContents contents) {
CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
TransitionSubpassLayouts(cb_state, cb_state->activeRenderPass, cb_state->activeSubpass,
GetFramebufferState(cb_state->activeRenderPassBeginInfo.framebuffer));
}
void CoreChecks::PostCallRecordCmdNextSubpass(VkCommandBuffer commandBuffer, VkSubpassContents contents) {
StateTracker::PostCallRecordCmdNextSubpass(commandBuffer, contents);
RecordCmdNextSubpassLayouts(commandBuffer, contents);
}
void CoreChecks::PostCallRecordCmdNextSubpass2KHR(VkCommandBuffer commandBuffer, const VkSubpassBeginInfoKHR *pSubpassBeginInfo,
const VkSubpassEndInfoKHR *pSubpassEndInfo) {
StateTracker::PostCallRecordCmdNextSubpass2KHR(commandBuffer, pSubpassBeginInfo, pSubpassEndInfo);
RecordCmdNextSubpassLayouts(commandBuffer, pSubpassBeginInfo->contents);
}
bool CoreChecks::ValidateCmdEndRenderPass(RenderPassCreateVersion rp_version, VkCommandBuffer commandBuffer) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = false;
const bool use_rp2 = (rp_version == RENDER_PASS_VERSION_2);
const char *vuid;
const char *const function_name = use_rp2 ? "vkCmdEndRenderPass2KHR()" : "vkCmdEndRenderPass()";
RENDER_PASS_STATE *rp_state = cb_state->activeRenderPass;
if (rp_state) {
if (cb_state->activeSubpass != rp_state->createInfo.subpassCount - 1) {
vuid = use_rp2 ? "VUID-vkCmdEndRenderPass2KHR-None-03103" : "VUID-vkCmdEndRenderPass-None-00910";
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), vuid, "%s: Called before reaching final subpass.", function_name);
}
}
vuid = use_rp2 ? "VUID-vkCmdEndRenderPass2KHR-renderpass" : "VUID-vkCmdEndRenderPass-renderpass";
skip |= OutsideRenderPass(cb_state, function_name, vuid);
vuid = use_rp2 ? "VUID-vkCmdEndRenderPass2KHR-bufferlevel" : "VUID-vkCmdEndRenderPass-bufferlevel";
skip |= ValidatePrimaryCommandBuffer(cb_state, function_name, vuid);
vuid = use_rp2 ? "VUID-vkCmdEndRenderPass2KHR-commandBuffer-cmdpool" : "VUID-vkCmdEndRenderPass-commandBuffer-cmdpool";
skip |= ValidateCmdQueueFlags(cb_state, function_name, VK_QUEUE_GRAPHICS_BIT, vuid);
const CMD_TYPE cmd_type = use_rp2 ? CMD_ENDRENDERPASS2KHR : CMD_ENDRENDERPASS;
skip |= ValidateCmd(cb_state, cmd_type, function_name);
return skip;
}
bool CoreChecks::PreCallValidateCmdEndRenderPass(VkCommandBuffer commandBuffer) const {
bool skip = ValidateCmdEndRenderPass(RENDER_PASS_VERSION_1, commandBuffer);
return skip;
}
bool CoreChecks::PreCallValidateCmdEndRenderPass2KHR(VkCommandBuffer commandBuffer,
const VkSubpassEndInfoKHR *pSubpassEndInfo) const {
bool skip = ValidateCmdEndRenderPass(RENDER_PASS_VERSION_2, commandBuffer);
return skip;
}
void CoreChecks::RecordCmdEndRenderPassLayouts(VkCommandBuffer commandBuffer) {
CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
FRAMEBUFFER_STATE *framebuffer = GetFramebufferState(cb_state->activeFramebuffer);
TransitionFinalSubpassLayouts(cb_state, &cb_state->activeRenderPassBeginInfo, framebuffer);
}
void CoreChecks::PostCallRecordCmdEndRenderPass(VkCommandBuffer commandBuffer) {
// Record the end at the CoreLevel to ensure StateTracker cleanup doesn't step on anything we need.
RecordCmdEndRenderPassLayouts(commandBuffer);
StateTracker::PostCallRecordCmdEndRenderPass(commandBuffer);
}
void CoreChecks::PostCallRecordCmdEndRenderPass2KHR(VkCommandBuffer commandBuffer, const VkSubpassEndInfoKHR *pSubpassEndInfo) {
// Record the end at the CoreLevel to ensure StateTracker cleanup doesn't step on anything we need.
RecordCmdEndRenderPassLayouts(commandBuffer);
StateTracker::PostCallRecordCmdEndRenderPass2KHR(commandBuffer, pSubpassEndInfo);
}
bool CoreChecks::ValidateFramebuffer(VkCommandBuffer primaryBuffer, const CMD_BUFFER_STATE *pCB, VkCommandBuffer secondaryBuffer,
const CMD_BUFFER_STATE *pSubCB, const char *caller) const {
bool skip = false;
if (!pSubCB->beginInfo.pInheritanceInfo) {
return skip;
}
VkFramebuffer primary_fb = pCB->activeFramebuffer;
VkFramebuffer secondary_fb = pSubCB->beginInfo.pInheritanceInfo->framebuffer;
if (secondary_fb != VK_NULL_HANDLE) {
if (primary_fb != secondary_fb) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(primaryBuffer), "VUID-vkCmdExecuteCommands-pCommandBuffers-00099",
"vkCmdExecuteCommands() called w/ invalid secondary %s which has a %s"
" that is not the same as the primary command buffer's current active %s.",
report_data->FormatHandle(secondaryBuffer).c_str(), report_data->FormatHandle(secondary_fb).c_str(),
report_data->FormatHandle(primary_fb).c_str());
}
auto fb = GetFramebufferState(secondary_fb);
if (!fb) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(primaryBuffer), kVUID_Core_DrawState_InvalidSecondaryCommandBuffer,
"vkCmdExecuteCommands() called w/ invalid %s which has invalid %s.",
report_data->FormatHandle(secondaryBuffer).c_str(), report_data->FormatHandle(secondary_fb).c_str());
return skip;
}
}
return skip;
}
bool CoreChecks::ValidateSecondaryCommandBufferState(const CMD_BUFFER_STATE *pCB, const CMD_BUFFER_STATE *pSubCB) const {
bool skip = false;
unordered_set<int> activeTypes;
if (!disabled.query_validation) {
for (auto queryObject : pCB->activeQueries) {
auto query_pool_state = GetQueryPoolState(queryObject.pool);
if (query_pool_state) {
if (query_pool_state->createInfo.queryType == VK_QUERY_TYPE_PIPELINE_STATISTICS &&
pSubCB->beginInfo.pInheritanceInfo) {
VkQueryPipelineStatisticFlags cmdBufStatistics = pSubCB->beginInfo.pInheritanceInfo->pipelineStatistics;
if ((cmdBufStatistics & query_pool_state->createInfo.pipelineStatistics) != cmdBufStatistics) {
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(pCB->commandBuffer), "VUID-vkCmdExecuteCommands-commandBuffer-00104",
"vkCmdExecuteCommands() called w/ invalid %s which has invalid active %s"
". Pipeline statistics is being queried so the command buffer must have all bits set on the queryPool.",
report_data->FormatHandle(pCB->commandBuffer).c_str(),
report_data->FormatHandle(queryObject.pool).c_str());
}
}
activeTypes.insert(query_pool_state->createInfo.queryType);
}
}
for (auto queryObject : pSubCB->startedQueries) {
auto query_pool_state = GetQueryPoolState(queryObject.pool);
if (query_pool_state && activeTypes.count(query_pool_state->createInfo.queryType)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(pCB->commandBuffer), kVUID_Core_DrawState_InvalidSecondaryCommandBuffer,
"vkCmdExecuteCommands() called w/ invalid %s which has invalid active %s"
" of type %d but a query of that type has been started on secondary %s.",
report_data->FormatHandle(pCB->commandBuffer).c_str(),
report_data->FormatHandle(queryObject.pool).c_str(), query_pool_state->createInfo.queryType,
report_data->FormatHandle(pSubCB->commandBuffer).c_str());
}
}
}
auto primary_pool = pCB->command_pool.get();
auto secondary_pool = pSubCB->command_pool.get();
if (primary_pool && secondary_pool && (primary_pool->queueFamilyIndex != secondary_pool->queueFamilyIndex)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(pSubCB->commandBuffer), kVUID_Core_DrawState_InvalidQueueFamily,
"vkCmdExecuteCommands(): Primary %s created in queue family %d has secondary "
"%s created in queue family %d.",
report_data->FormatHandle(pCB->commandBuffer).c_str(), primary_pool->queueFamilyIndex,
report_data->FormatHandle(pSubCB->commandBuffer).c_str(), secondary_pool->queueFamilyIndex);
}
return skip;
}
bool CoreChecks::PreCallValidateCmdExecuteCommands(VkCommandBuffer commandBuffer, uint32_t commandBuffersCount,
const VkCommandBuffer *pCommandBuffers) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = false;
const CMD_BUFFER_STATE *sub_cb_state = NULL;
std::unordered_set<const CMD_BUFFER_STATE *> linked_command_buffers;
for (uint32_t i = 0; i < commandBuffersCount; i++) {
sub_cb_state = GetCBState(pCommandBuffers[i]);
assert(sub_cb_state);
if (VK_COMMAND_BUFFER_LEVEL_PRIMARY == sub_cb_state->createInfo.level) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(pCommandBuffers[i]), "VUID-vkCmdExecuteCommands-pCommandBuffers-00088",
"vkCmdExecuteCommands() called w/ Primary %s in element %u of pCommandBuffers array. All "
"cmd buffers in pCommandBuffers array must be secondary.",
report_data->FormatHandle(pCommandBuffers[i]).c_str(), i);
} else if (VK_COMMAND_BUFFER_LEVEL_SECONDARY == sub_cb_state->createInfo.level) {
if (sub_cb_state->beginInfo.pInheritanceInfo != nullptr) {
const auto secondary_rp_state = GetRenderPassState(sub_cb_state->beginInfo.pInheritanceInfo->renderPass);
if (cb_state->activeRenderPass &&
!(sub_cb_state->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(pCommandBuffers[i]), "VUID-vkCmdExecuteCommands-pCommandBuffers-00096",
"vkCmdExecuteCommands(): Secondary %s is executed within a %s "
"instance scope, but the Secondary Command Buffer does not have the "
"VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT set in VkCommandBufferBeginInfo::flags when "
"the vkBeginCommandBuffer() was called.",
report_data->FormatHandle(pCommandBuffers[i]).c_str(),
report_data->FormatHandle(cb_state->activeRenderPass->renderPass).c_str());
} else if (!cb_state->activeRenderPass &&
(sub_cb_state->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(pCommandBuffers[i]), "VUID-vkCmdExecuteCommands-pCommandBuffers-00100",
"vkCmdExecuteCommands(): Secondary %s is executed outside a render pass "
"instance scope, but the Secondary Command Buffer does have the "
"VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT set in VkCommandBufferBeginInfo::flags when "
"the vkBeginCommandBuffer() was called.",
report_data->FormatHandle(pCommandBuffers[i]).c_str());
} else if (cb_state->activeRenderPass &&
(sub_cb_state->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
// Make sure render pass is compatible with parent command buffer pass if has continue
if (cb_state->activeRenderPass->renderPass != secondary_rp_state->renderPass) {
skip |= ValidateRenderPassCompatibility(
"primary command buffer", cb_state->activeRenderPass, "secondary command buffer", secondary_rp_state,
"vkCmdExecuteCommands()", "VUID-vkCmdExecuteCommands-pInheritanceInfo-00098");
}
// If framebuffer for secondary CB is not NULL, then it must match active FB from primaryCB
skip |=
ValidateFramebuffer(commandBuffer, cb_state, pCommandBuffers[i], sub_cb_state, "vkCmdExecuteCommands()");
if (!sub_cb_state->cmd_execute_commands_functions.empty()) {
// Inherit primary's activeFramebuffer and while running validate functions
for (auto &function : sub_cb_state->cmd_execute_commands_functions) {
skip |= function(cb_state, cb_state->activeFramebuffer);
}
}
}
}
}
// TODO(mlentine): Move more logic into this method
skip |= ValidateSecondaryCommandBufferState(cb_state, sub_cb_state);
skip |= ValidateCommandBufferState(sub_cb_state, "vkCmdExecuteCommands()", 0,
"VUID-vkCmdExecuteCommands-pCommandBuffers-00089");
if (!(sub_cb_state->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
if (sub_cb_state->in_use.load()) {
// TODO: Find some way to differentiate between the -00090 and -00091 conditions
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_state->commandBuffer), "VUID-vkCmdExecuteCommands-pCommandBuffers-00090",
"Cannot execute pending %s without VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set.",
report_data->FormatHandle(sub_cb_state->commandBuffer).c_str());
}
// We use an const_cast, because one cannot query a container keyed on a non-const pointer using a const pointer
if (cb_state->linkedCommandBuffers.count(const_cast<CMD_BUFFER_STATE *>(sub_cb_state))) {
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_state->commandBuffer), "VUID-vkCmdExecuteCommands-pCommandBuffers-00092",
"Cannot execute %s without VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set if previously executed in %s",
report_data->FormatHandle(sub_cb_state->commandBuffer).c_str(),
report_data->FormatHandle(cb_state->commandBuffer).c_str());
}
const auto insert_pair = linked_command_buffers.insert(sub_cb_state);
if (!insert_pair.second) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_state->commandBuffer), "VUID-vkCmdExecuteCommands-pCommandBuffers-00093",
"Cannot duplicate %s in pCommandBuffers without VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set.",
report_data->FormatHandle(cb_state->commandBuffer).c_str());
}
if (cb_state->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT) {
// Warn that non-simultaneous secondary cmd buffer renders primary non-simultaneous
skip |= log_msg(report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(pCommandBuffers[i]), kVUID_Core_DrawState_InvalidCommandBufferSimultaneousUse,
"vkCmdExecuteCommands(): Secondary %s does not have "
"VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set and will cause primary "
"%s to be treated as if it does not have "
"VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set, even though it does.",
report_data->FormatHandle(pCommandBuffers[i]).c_str(),
report_data->FormatHandle(cb_state->commandBuffer).c_str());
}
}
if (!cb_state->activeQueries.empty() && !enabled_features.core.inheritedQueries) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(pCommandBuffers[i]), "VUID-vkCmdExecuteCommands-commandBuffer-00101",
"vkCmdExecuteCommands(): Secondary %s cannot be submitted with a query in flight and "
"inherited queries not supported on this device.",
report_data->FormatHandle(pCommandBuffers[i]).c_str());
}
// Validate initial layout uses vs. the primary cmd buffer state
// Novel Valid usage: "UNASSIGNED-vkCmdExecuteCommands-commandBuffer-00001"
// initial layout usage of secondary command buffers resources must match parent command buffer
const auto *const_cb_state = static_cast<const CMD_BUFFER_STATE *>(cb_state);
for (const auto &sub_layout_map_entry : sub_cb_state->image_layout_map) {
const auto image = sub_layout_map_entry.first;
const auto *image_state = GetImageState(image);
if (!image_state) continue; // Can't set layouts of a dead image
const auto *cb_subres_map = GetImageSubresourceLayoutMap(const_cb_state, image);
// Const getter can be null in which case we have nothing to check against for this image...
if (!cb_subres_map) continue;
const auto &sub_cb_subres_map = sub_layout_map_entry.second;
// Validate the initial_uses, that they match the current state of the primary cb, or absent a current state,
// that the match any initial_layout.
for (auto it_init = sub_cb_subres_map->BeginInitialUse(); !it_init.AtEnd(); ++it_init) {
const auto &sub_layout = (*it_init).layout;
if (VK_IMAGE_LAYOUT_UNDEFINED == sub_layout) continue; // secondary doesn't care about current or initial
const auto &subresource = (*it_init).subresource;
// Look up the current layout (if any)
VkImageLayout cb_layout = cb_subres_map->GetSubresourceLayout(subresource);
const char *layout_type = "current";
if (cb_layout == kInvalidLayout) {
// Find initial layout (if any)
cb_layout = cb_subres_map->GetSubresourceInitialLayout(subresource);
layout_type = "initial";
}
if ((cb_layout != kInvalidLayout) && (cb_layout != sub_layout)) {
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(pCommandBuffers[i]), "UNASSIGNED-vkCmdExecuteCommands-commandBuffer-00001",
"%s: Executed secondary command buffer using %s (subresource: aspectMask 0x%X array layer %u, "
"mip level %u) which expects layout %s--instead, image %s layout is %s.",
"vkCmdExecuteCommands():", report_data->FormatHandle(image).c_str(), subresource.aspectMask,
subresource.arrayLayer, subresource.mipLevel, string_VkImageLayout(sub_layout), layout_type,
string_VkImageLayout(cb_layout));
}
}
}
}
skip |= ValidatePrimaryCommandBuffer(cb_state, "vkCmdExecuteCommands()", "VUID-vkCmdExecuteCommands-bufferlevel");
skip |= ValidateCmdQueueFlags(cb_state, "vkCmdExecuteCommands()",
VK_QUEUE_TRANSFER_BIT | VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
"VUID-vkCmdExecuteCommands-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_EXECUTECOMMANDS, "vkCmdExecuteCommands()");
return skip;
}
bool CoreChecks::PreCallValidateMapMemory(VkDevice device, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size,
VkFlags flags, void **ppData) const {
bool skip = false;
const DEVICE_MEMORY_STATE *mem_info = GetDevMemState(mem);
if (mem_info) {
if ((phys_dev_mem_props.memoryTypes[mem_info->alloc_info.memoryTypeIndex].propertyFlags &
VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0) {
skip = log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
HandleToUint64(mem), "VUID-vkMapMemory-memory-00682",
"Mapping Memory without VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT set: %s.",
report_data->FormatHandle(mem).c_str());
}
skip |= ValidateMapMemRange(mem_info, offset, size);
}
return skip;
}
void CoreChecks::PostCallRecordMapMemory(VkDevice device, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size, VkFlags flags,
void **ppData, VkResult result) {
if (VK_SUCCESS != result) return;
StateTracker::PostCallRecordMapMemory(device, mem, offset, size, flags, ppData, result);
InitializeShadowMemory(mem, offset, size, ppData);
}
bool CoreChecks::PreCallValidateUnmapMemory(VkDevice device, VkDeviceMemory mem) const {
bool skip = false;
const auto mem_info = GetDevMemState(mem);
if (mem_info && !mem_info->mapped_range.size) {
// Valid Usage: memory must currently be mapped
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
HandleToUint64(mem), "VUID-vkUnmapMemory-memory-00689", "Unmapping Memory without memory being mapped: %s.",
report_data->FormatHandle(mem).c_str());
}
return skip;
}
void CoreChecks::PreCallRecordUnmapMemory(VkDevice device, VkDeviceMemory mem) {
// Only core checks uses the shadow copy, clear that up here
auto mem_info = GetDevMemState(mem);
if (mem_info && mem_info->shadow_copy_base) {
free(mem_info->shadow_copy_base);
mem_info->shadow_copy_base = nullptr;
mem_info->shadow_copy = nullptr;
mem_info->shadow_pad_size = 0;
}
StateTracker::PreCallRecordUnmapMemory(device, mem);
}
bool CoreChecks::ValidateMemoryIsMapped(const char *funcName, uint32_t memRangeCount, const VkMappedMemoryRange *pMemRanges) const {
bool skip = false;
for (uint32_t i = 0; i < memRangeCount; ++i) {
auto mem_info = GetDevMemState(pMemRanges[i].memory);
if (mem_info) {
if (pMemRanges[i].size == VK_WHOLE_SIZE) {
if (mem_info->mapped_range.offset > pMemRanges[i].offset) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
HandleToUint64(pMemRanges[i].memory), "VUID-VkMappedMemoryRange-size-00686",
"%s: Flush/Invalidate offset (" PRINTF_SIZE_T_SPECIFIER
") is less than Memory Object's offset (" PRINTF_SIZE_T_SPECIFIER ").",
funcName, static_cast<size_t>(pMemRanges[i].offset),
static_cast<size_t>(mem_info->mapped_range.offset));
}
} else {
const uint64_t data_end = (mem_info->mapped_range.size == VK_WHOLE_SIZE)
? mem_info->alloc_info.allocationSize
: (mem_info->mapped_range.offset + mem_info->mapped_range.size);
if ((mem_info->mapped_range.offset > pMemRanges[i].offset) ||
(data_end < (pMemRanges[i].offset + pMemRanges[i].size))) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
HandleToUint64(pMemRanges[i].memory), "VUID-VkMappedMemoryRange-size-00685",
"%s: Flush/Invalidate size or offset (" PRINTF_SIZE_T_SPECIFIER ", " PRINTF_SIZE_T_SPECIFIER
") exceed the Memory Object's upper-bound (" PRINTF_SIZE_T_SPECIFIER ").",
funcName, static_cast<size_t>(pMemRanges[i].offset + pMemRanges[i].size),
static_cast<size_t>(pMemRanges[i].offset), static_cast<size_t>(data_end));
}
}
}
}
return skip;
}
bool CoreChecks::ValidateAndCopyNoncoherentMemoryToDriver(uint32_t mem_range_count, const VkMappedMemoryRange *mem_ranges) const {
bool skip = false;
for (uint32_t i = 0; i < mem_range_count; ++i) {
auto mem_info = GetDevMemState(mem_ranges[i].memory);
if (mem_info) {
if (mem_info->shadow_copy) {
VkDeviceSize size = (mem_info->mapped_range.size != VK_WHOLE_SIZE)
? mem_info->mapped_range.size
: (mem_info->alloc_info.allocationSize - mem_info->mapped_range.offset);
char *data = static_cast<char *>(mem_info->shadow_copy);
for (uint64_t j = 0; j < mem_info->shadow_pad_size; ++j) {
if (data[j] != NoncoherentMemoryFillValue) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
HandleToUint64(mem_ranges[i].memory), kVUID_Core_MemTrack_InvalidMap,
"Memory underflow was detected on %s.",
report_data->FormatHandle(mem_ranges[i].memory).c_str());
}
}
for (uint64_t j = (size + mem_info->shadow_pad_size); j < (2 * mem_info->shadow_pad_size + size); ++j) {
if (data[j] != NoncoherentMemoryFillValue) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
HandleToUint64(mem_ranges[i].memory), kVUID_Core_MemTrack_InvalidMap,
"Memory overflow was detected on %s.", report_data->FormatHandle(mem_ranges[i].memory).c_str());
}
}
memcpy(mem_info->p_driver_data, static_cast<void *>(data + mem_info->shadow_pad_size), (size_t)(size));
}
}
}
return skip;
}
void CoreChecks::CopyNoncoherentMemoryFromDriver(uint32_t mem_range_count, const VkMappedMemoryRange *mem_ranges) {
for (uint32_t i = 0; i < mem_range_count; ++i) {
auto mem_info = GetDevMemState(mem_ranges[i].memory);
if (mem_info && mem_info->shadow_copy) {
VkDeviceSize size = (mem_info->mapped_range.size != VK_WHOLE_SIZE)
? mem_info->mapped_range.size
: (mem_info->alloc_info.allocationSize - mem_ranges[i].offset);
char *data = static_cast<char *>(mem_info->shadow_copy);
memcpy(data + mem_info->shadow_pad_size, mem_info->p_driver_data, (size_t)(size));
}
}
}
bool CoreChecks::ValidateMappedMemoryRangeDeviceLimits(const char *func_name, uint32_t mem_range_count,
const VkMappedMemoryRange *mem_ranges) const {
bool skip = false;
for (uint32_t i = 0; i < mem_range_count; ++i) {
uint64_t atom_size = phys_dev_props.limits.nonCoherentAtomSize;
if (SafeModulo(mem_ranges[i].offset, atom_size) != 0) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
HandleToUint64(mem_ranges->memory), "VUID-VkMappedMemoryRange-offset-00687",
"%s: Offset in pMemRanges[%d] is 0x%" PRIxLEAST64
", which is not a multiple of VkPhysicalDeviceLimits::nonCoherentAtomSize (0x%" PRIxLEAST64 ").",
func_name, i, mem_ranges[i].offset, atom_size);
}
auto mem_info = GetDevMemState(mem_ranges[i].memory);
if (mem_info) {
if ((mem_ranges[i].size != VK_WHOLE_SIZE) &&
(mem_ranges[i].size + mem_ranges[i].offset != mem_info->alloc_info.allocationSize) &&
(SafeModulo(mem_ranges[i].size, atom_size) != 0)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
HandleToUint64(mem_ranges->memory), "VUID-VkMappedMemoryRange-size-01390",
"%s: Size in pMemRanges[%d] is 0x%" PRIxLEAST64
", which is not a multiple of VkPhysicalDeviceLimits::nonCoherentAtomSize (0x%" PRIxLEAST64 ").",
func_name, i, mem_ranges[i].size, atom_size);
}
}
}
return skip;
}
bool CoreChecks::PreCallValidateFlushMappedMemoryRanges(VkDevice device, uint32_t memRangeCount,
const VkMappedMemoryRange *pMemRanges) const {
bool skip = false;
skip |= ValidateMappedMemoryRangeDeviceLimits("vkFlushMappedMemoryRanges", memRangeCount, pMemRanges);
skip |= ValidateAndCopyNoncoherentMemoryToDriver(memRangeCount, pMemRanges);
skip |= ValidateMemoryIsMapped("vkFlushMappedMemoryRanges", memRangeCount, pMemRanges);
return skip;
}
bool CoreChecks::PreCallValidateInvalidateMappedMemoryRanges(VkDevice device, uint32_t memRangeCount,
const VkMappedMemoryRange *pMemRanges) const {
bool skip = false;
skip |= ValidateMappedMemoryRangeDeviceLimits("vkInvalidateMappedMemoryRanges", memRangeCount, pMemRanges);
skip |= ValidateMemoryIsMapped("vkInvalidateMappedMemoryRanges", memRangeCount, pMemRanges);
return skip;
}
void CoreChecks::PostCallRecordInvalidateMappedMemoryRanges(VkDevice device, uint32_t memRangeCount,
const VkMappedMemoryRange *pMemRanges, VkResult result) {
if (VK_SUCCESS == result) {
// Update our shadow copy with modified driver data
CopyNoncoherentMemoryFromDriver(memRangeCount, pMemRanges);
}
}
bool CoreChecks::PreCallValidateGetDeviceMemoryCommitment(VkDevice device, VkDeviceMemory mem, VkDeviceSize *pCommittedMem) const {
bool skip = false;
const auto mem_info = GetDevMemState(mem);
if (mem_info) {
if ((phys_dev_mem_props.memoryTypes[mem_info->alloc_info.memoryTypeIndex].propertyFlags &
VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) == 0) {
skip = log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
HandleToUint64(mem), "VUID-vkGetDeviceMemoryCommitment-memory-00690",
"Querying commitment for memory without VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT set: %s.",
report_data->FormatHandle(mem).c_str());
}
}
return skip;
}
bool CoreChecks::ValidateBindImageMemory(const VkBindImageMemoryInfo &bindInfo, const char *api_name) const {
bool skip = false;
const IMAGE_STATE *image_state = GetImageState(bindInfo.image);
if (image_state) {
// Track objects tied to memory
uint64_t image_handle = HandleToUint64(bindInfo.image);
skip = ValidateSetMemBinding(bindInfo.memory, VulkanTypedHandle(bindInfo.image, kVulkanObjectTypeImage), api_name);
#ifdef VK_USE_PLATFORM_ANDROID_KHR
if (image_state->external_format_android) {
if (image_state->memory_requirements_checked) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, image_handle,
kVUID_Core_BindImage_InvalidMemReqQuery,
"%s: Must not call vkGetImageMemoryRequirements on %s that will be bound to an external "
"Android hardware buffer.",
api_name, report_data->FormatHandle(bindInfo.image).c_str());
}
return skip;
}
#endif // VK_USE_PLATFORM_ANDROID_KHR
// Validate bound memory range information
const auto mem_info = GetDevMemState(bindInfo.memory);
if (mem_info) {
skip |= ValidateInsertImageMemoryRange(bindInfo.image, mem_info, bindInfo.memoryOffset, image_state->requirements,
image_state->createInfo.tiling == VK_IMAGE_TILING_LINEAR, api_name);
skip |= ValidateMemoryTypes(mem_info, image_state->requirements.memoryTypeBits, api_name,
"VUID-vkBindImageMemory-memory-01047");
}
// Validate memory requirements alignment
if (SafeModulo(bindInfo.memoryOffset, image_state->requirements.alignment) != 0) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, image_handle,
"VUID-vkBindImageMemory-memoryOffset-01048",
"%s: memoryOffset is 0x%" PRIxLEAST64
" but must be an integer multiple of the VkMemoryRequirements::alignment value 0x%" PRIxLEAST64
", returned from a call to vkGetImageMemoryRequirements with image.",
api_name, bindInfo.memoryOffset, image_state->requirements.alignment);
}
if (mem_info) {
// Validate memory requirements size
if (image_state->requirements.size > mem_info->alloc_info.allocationSize - bindInfo.memoryOffset) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, image_handle,
"VUID-vkBindImageMemory-size-01049",
"%s: memory size minus memoryOffset is 0x%" PRIxLEAST64
" but must be at least as large as VkMemoryRequirements::size value 0x%" PRIxLEAST64
", returned from a call to vkGetImageMemoryRequirements with image.",
api_name, mem_info->alloc_info.allocationSize - bindInfo.memoryOffset, image_state->requirements.size);
}
// Validate dedicated allocation
if (mem_info->is_dedicated) {
if (enabled_features.dedicated_allocation_image_aliasing_features.dedicatedAllocationImageAliasing) {
const auto orig_image_state = GetImageState(mem_info->dedicated_image);
const auto current_image_state = GetImageState(bindInfo.image);
if ((bindInfo.memoryOffset != 0) || !orig_image_state || !current_image_state ||
!current_image_state->IsCreateInfoDedicatedAllocationImageAliasingCompatible(
orig_image_state->createInfo)) {
const char *validation_error;
if (strcmp(api_name, "vkBindImageMemory()") == 0) {
validation_error = "VUID-vkBindImageMemory-memory-02629";
} else {
validation_error = "VUID-VkBindImageMemoryInfo-memory-02631";
}
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, image_handle,
validation_error,
"%s: for dedicated memory allocation %s, VkMemoryDedicatedAllocateInfoKHR:: %s must compatible "
"with %s and memoryOffset 0x%" PRIxLEAST64 " must be zero.",
api_name, report_data->FormatHandle(bindInfo.memory).c_str(),
report_data->FormatHandle(mem_info->dedicated_image).c_str(),
report_data->FormatHandle(bindInfo.image).c_str(), bindInfo.memoryOffset);
}
} else {
if ((bindInfo.memoryOffset != 0) || (mem_info->dedicated_image != bindInfo.image)) {
const char *validation_error;
if (strcmp(api_name, "vkBindImageMemory()") == 0) {
validation_error = "VUID-vkBindImageMemory-memory-01509";
} else {
validation_error = "VUID-VkBindImageMemoryInfo-memory-01903";
}
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, image_handle,
validation_error,
"%s: for dedicated memory allocation %s, VkMemoryDedicatedAllocateInfoKHR:: %s must be equal "
"to %s and memoryOffset 0x%" PRIxLEAST64 " must be zero.",
api_name, report_data->FormatHandle(bindInfo.memory).c_str(),
report_data->FormatHandle(mem_info->dedicated_image).c_str(),
report_data->FormatHandle(bindInfo.image).c_str(), bindInfo.memoryOffset);
}
}
}
}
const auto swapchain_info = lvl_find_in_chain<VkBindImageMemorySwapchainInfoKHR>(bindInfo.pNext);
if (swapchain_info) {
if (bindInfo.memory != VK_NULL_HANDLE) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, image_handle,
"VUID-VkBindImageMemoryInfo-pNext-01631", "%s: %s is not VK_NULL_HANDLE.", api_name,
report_data->FormatHandle(bindInfo.memory).c_str());
}
if (image_state->create_from_swapchain != swapchain_info->swapchain) {
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(image_state->image), kVUID_Core_BindImageMemory_Swapchain,
"%s: %s is created by %s, but the image is bound by %s. The image should be created and bound by the same "
"swapchain",
api_name, report_data->FormatHandle(image_state->image).c_str(),
report_data->FormatHandle(image_state->create_from_swapchain).c_str(),
report_data->FormatHandle(swapchain_info->swapchain).c_str());
}
const auto swapchain_state = GetSwapchainState(swapchain_info->swapchain);
if (swapchain_state && swapchain_state->images.size() <= swapchain_info->imageIndex) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, image_handle,
"VUID-VkBindImageMemorySwapchainInfoKHR-imageIndex-01644",
"%s: imageIndex (%i) is out of bounds of %s images (size: %i)", api_name, swapchain_info->imageIndex,
report_data->FormatHandle(swapchain_info->swapchain).c_str(), (int)swapchain_state->images.size());
}
} else {
if (image_state->create_from_swapchain) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, image_handle,
"VUID-VkBindImageMemoryInfo-image-01630",
"%s: pNext of VkBindImageMemoryInfo doesn't include VkBindImageMemorySwapchainInfoKHR.", api_name);
}
if (!mem_info) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, image_handle,
"VUID-VkBindImageMemoryInfo-pNext-01632", "%s: %s is invalid.", api_name,
report_data->FormatHandle(bindInfo.memory).c_str());
}
}
}
return skip;
}
bool CoreChecks::PreCallValidateBindImageMemory(VkDevice device, VkImage image, VkDeviceMemory mem,
VkDeviceSize memoryOffset) const {
VkBindImageMemoryInfo bindInfo = {};
bindInfo.sType = VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_INFO;
bindInfo.image = image;
bindInfo.memory = mem;
bindInfo.memoryOffset = memoryOffset;
return ValidateBindImageMemory(bindInfo, "vkBindImageMemory()");
}
bool CoreChecks::PreCallValidateBindImageMemory2(VkDevice device, uint32_t bindInfoCount,
const VkBindImageMemoryInfoKHR *pBindInfos) const {
bool skip = false;
char api_name[128];
for (uint32_t i = 0; i < bindInfoCount; i++) {
sprintf(api_name, "vkBindImageMemory2() pBindInfos[%u]", i);
skip |= ValidateBindImageMemory(pBindInfos[i], api_name);
}
return skip;
}
bool CoreChecks::PreCallValidateBindImageMemory2KHR(VkDevice device, uint32_t bindInfoCount,
const VkBindImageMemoryInfoKHR *pBindInfos) const {
bool skip = false;
char api_name[128];
for (uint32_t i = 0; i < bindInfoCount; i++) {
sprintf(api_name, "vkBindImageMemory2KHR() pBindInfos[%u]", i);
skip |= ValidateBindImageMemory(pBindInfos[i], api_name);
}
return skip;
}
bool CoreChecks::PreCallValidateSetEvent(VkDevice device, VkEvent event) const {
bool skip = false;
const auto event_state = GetEventState(event);
if (event_state) {
if (event_state->write_in_use) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT,
HandleToUint64(event), kVUID_Core_DrawState_QueueForwardProgress,
"Cannot call vkSetEvent() on %s that is already in use by a command buffer.",
report_data->FormatHandle(event).c_str());
}
}
return skip;
}
bool CoreChecks::PreCallValidateQueueBindSparse(VkQueue queue, uint32_t bindInfoCount, const VkBindSparseInfo *pBindInfo,
VkFence fence) const {
const auto queue_data = GetQueueState(queue);
const auto pFence = GetFenceState(fence);
bool skip = ValidateFenceForSubmit(pFence);
if (skip) {
return true;
}
const auto queueFlags = GetPhysicalDeviceState()->queue_family_properties[queue_data->queueFamilyIndex].queueFlags;
if (!(queueFlags & VK_QUEUE_SPARSE_BINDING_BIT)) {
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT, HandleToUint64(queue),
"VUID-vkQueueBindSparse-queuetype",
"Attempting vkQueueBindSparse on a non-memory-management capable queue -- VK_QUEUE_SPARSE_BINDING_BIT not set.");
}
unordered_set<VkSemaphore> signaled_semaphores;
unordered_set<VkSemaphore> unsignaled_semaphores;
unordered_set<VkSemaphore> internal_semaphores;
unordered_map<VkSemaphore, std::set<uint64_t>> timeline_values;
for (uint32_t bindIdx = 0; bindIdx < bindInfoCount; ++bindIdx) {
const VkBindSparseInfo &bindInfo = pBindInfo[bindIdx];
auto timeline_semaphore_submit_info = lvl_find_in_chain<VkTimelineSemaphoreSubmitInfoKHR>(pBindInfo->pNext);
std::vector<SEMAPHORE_WAIT> semaphore_waits;
std::vector<VkSemaphore> semaphore_signals;
for (uint32_t i = 0; i < bindInfo.waitSemaphoreCount; ++i) {
VkSemaphore semaphore = bindInfo.pWaitSemaphores[i];
const auto pSemaphore = GetSemaphoreState(semaphore);
if (pSemaphore && pSemaphore->type == VK_SEMAPHORE_TYPE_TIMELINE_KHR && !timeline_semaphore_submit_info) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
HandleToUint64(semaphore), "VUID-VkBindSparseInfo-pWaitSemaphores-03246",
"VkQueueBindSparse: %s is a timeline semaphore, but pBindInfo does not"
"include an instance of VkTimelineSemaphoreSubmitInfoKHR",
report_data->FormatHandle(semaphore).c_str());
}
if (pSemaphore && pSemaphore->type == VK_SEMAPHORE_TYPE_TIMELINE_KHR && timeline_semaphore_submit_info &&
bindInfo.waitSemaphoreCount != timeline_semaphore_submit_info->waitSemaphoreValueCount) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
HandleToUint64(semaphore), "VUID-VkBindSparseInfo-pNext-03247",
"VkQueueBindSparse: %s is a timeline semaphore, it contains an instance of"
"VkTimelineSemaphoreSubmitInfoKHR, but waitSemaphoreValueCount is different than "
"waitSemaphoreCount",
report_data->FormatHandle(semaphore).c_str());
}
if (pSemaphore && pSemaphore->type == VK_SEMAPHORE_TYPE_BINARY_KHR &&
(pSemaphore->scope == kSyncScopeInternal || internal_semaphores.count(semaphore))) {
if (unsignaled_semaphores.count(semaphore) ||
(!(signaled_semaphores.count(semaphore)) && !(pSemaphore->signaled))) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
HandleToUint64(semaphore), kVUID_Core_DrawState_QueueForwardProgress,
"%s is waiting on %s that has no way to be signaled.", report_data->FormatHandle(queue).c_str(),
report_data->FormatHandle(semaphore).c_str());
} else {
signaled_semaphores.erase(semaphore);
unsignaled_semaphores.insert(semaphore);
}
}
if (pSemaphore && pSemaphore->type == VK_SEMAPHORE_TYPE_BINARY_KHR &&
pSemaphore->scope == kSyncScopeExternalTemporary) {
internal_semaphores.insert(semaphore);
}
if (!skip && pSemaphore && pSemaphore->type == VK_SEMAPHORE_TYPE_TIMELINE_KHR) {
auto &values = timeline_values[semaphore];
if (values.empty()) {
values.insert(pSemaphore->payload);
}
values.insert(timeline_semaphore_submit_info->pWaitSemaphoreValues[i]);
}
}
for (uint32_t i = 0; i < bindInfo.signalSemaphoreCount; ++i) {
VkSemaphore semaphore = bindInfo.pSignalSemaphores[i];
const auto pSemaphore = GetSemaphoreState(semaphore);
if (pSemaphore && pSemaphore->type == VK_SEMAPHORE_TYPE_TIMELINE_KHR && !timeline_semaphore_submit_info) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
HandleToUint64(semaphore), "VUID-VkBindSparseInfo-pWaitSemaphores-03246",
"VkQueueBindSparse: %s is a timeline semaphore, but pBindInfo does not"
"include an instance of VkTimelineSemaphoreSubmitInfoKHR",
report_data->FormatHandle(semaphore).c_str());
}
if (pSemaphore && pSemaphore->type == VK_SEMAPHORE_TYPE_TIMELINE_KHR && timeline_semaphore_submit_info &&
timeline_semaphore_submit_info->pSignalSemaphoreValues[i] <= pSemaphore->payload) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
HandleToUint64(semaphore), "VUID-VkBindSparseInfo-pSignalSemaphores-03249",
"VkQueueBindSparse: signal value in %s must be greater than current timeline semaphore %s value",
report_data->FormatHandle(queue).c_str(), report_data->FormatHandle(semaphore).c_str());
}
if (pSemaphore && pSemaphore->type == VK_SEMAPHORE_TYPE_TIMELINE_KHR && timeline_semaphore_submit_info &&
bindInfo.signalSemaphoreCount != timeline_semaphore_submit_info->signalSemaphoreValueCount) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
HandleToUint64(semaphore), "VUID-VkBindSparseInfo-pNext-03248",
"VkQueueBindSparse: %s is a timeline semaphore, it contains an instance of"
"VkTimelineSemaphoreSubmitInfoKHR, but signalSemaphoreValueCount is different than "
"signalSemaphoreCount",
report_data->FormatHandle(semaphore).c_str());
}
if (pSemaphore && pSemaphore->type == VK_SEMAPHORE_TYPE_BINARY_KHR && pSemaphore->scope == kSyncScopeInternal) {
if (signaled_semaphores.count(semaphore) || (!(unsignaled_semaphores.count(semaphore)) && pSemaphore->signaled)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
HandleToUint64(semaphore), kVUID_Core_DrawState_QueueForwardProgress,
"%s is signaling %s that was previously signaled by %s but has not since "
"been waited on by any queue.",
report_data->FormatHandle(queue).c_str(), report_data->FormatHandle(semaphore).c_str(),
report_data->FormatHandle(pSemaphore->signaler.first).c_str());
} else {
unsignaled_semaphores.erase(semaphore);
signaled_semaphores.insert(semaphore);
}
}
if (!skip && pSemaphore && pSemaphore->type == VK_SEMAPHORE_TYPE_TIMELINE_KHR) {
auto &values = timeline_values[semaphore];
if (values.empty()) {
values.insert(pSemaphore->payload);
}
values.insert(timeline_semaphore_submit_info->pSignalSemaphoreValues[i]);
}
}
}
for (uint32_t bindIdx = 0; bindIdx < bindInfoCount; ++bindIdx) {
const VkBindSparseInfo *bindInfo = &pBindInfo[bindIdx];
auto *info = lvl_find_in_chain<VkTimelineSemaphoreSubmitInfoKHR>(bindInfo->pNext);
for (uint32_t i = 0; i < bindInfo->waitSemaphoreCount; ++i) {
VkSemaphore semaphore = bindInfo->pWaitSemaphores[i];
skip |= ValidateMaxTimelineSemaphoreValueDifference(queue, semaphore, info ? info->pWaitSemaphoreValues[i] : 0,
&timeline_values, "VkQueueBindSparse",
"VUID-VkBindSparseInfo-pWaitSemaphores-03250");
}
for (uint32_t i = 0; i < bindInfo->signalSemaphoreCount; ++i) {
VkSemaphore semaphore = bindInfo->pSignalSemaphores[i];
skip |= ValidateMaxTimelineSemaphoreValueDifference(queue, semaphore, info ? info->pSignalSemaphoreValues[i] : 0,
&timeline_values, "VkQueueBindSparse",
"VUID-VkBindSparseInfo-pSignalSemaphores-03251");
}
}
return skip;
}
bool CoreChecks::PreCallValidateSignalSemaphoreKHR(VkDevice device, const VkSemaphoreSignalInfoKHR *pSignalInfo) const {
bool skip = false;
const auto pSemaphore = GetSemaphoreState(pSignalInfo->semaphore);
if (pSemaphore && pSemaphore->type != VK_SEMAPHORE_TYPE_TIMELINE_KHR) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
HandleToUint64(pSignalInfo->semaphore), "VUID-VkSemaphoreSignalInfoKHR-semaphore-03257",
"VkSignalSemaphoreKHR: semaphore %s must be of VK_SEMAPHORE_TYPE_TIMELINE_KHR type",
report_data->FormatHandle(pSignalInfo->semaphore).c_str());
}
return skip;
}
bool CoreChecks::ValidateImportSemaphore(VkSemaphore semaphore, const char *caller_name) const {
bool skip = false;
const SEMAPHORE_STATE *sema_node = GetSemaphoreState(semaphore);
if (sema_node) {
const VulkanTypedHandle obj_struct(semaphore, kVulkanObjectTypeSemaphore);
skip |= ValidateObjectNotInUse(sema_node, obj_struct, caller_name, kVUIDUndefined);
}
return skip;
}
#ifdef VK_USE_PLATFORM_WIN32_KHR
bool CoreChecks::PreCallValidateImportSemaphoreWin32HandleKHR(
VkDevice device, const VkImportSemaphoreWin32HandleInfoKHR *pImportSemaphoreWin32HandleInfo) const {
return ValidateImportSemaphore(pImportSemaphoreWin32HandleInfo->semaphore, "vkImportSemaphoreWin32HandleKHR");
}
#endif // VK_USE_PLATFORM_WIN32_KHR
bool CoreChecks::PreCallValidateImportSemaphoreFdKHR(VkDevice device,
const VkImportSemaphoreFdInfoKHR *pImportSemaphoreFdInfo) const {
return ValidateImportSemaphore(pImportSemaphoreFdInfo->semaphore, "vkImportSemaphoreFdKHR");
}
bool CoreChecks::ValidateImportFence(VkFence fence, const char *caller_name) const {
const FENCE_STATE *fence_node = GetFenceState(fence);
bool skip = false;
if (fence_node && fence_node->scope == kSyncScopeInternal && fence_node->state == FENCE_INFLIGHT) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT, HandleToUint64(fence),
kVUIDUndefined, "Cannot call %s on %s that is currently in use.", caller_name,
report_data->FormatHandle(fence).c_str());
}
return skip;
}
#ifdef VK_USE_PLATFORM_WIN32_KHR
bool CoreChecks::PreCallValidateImportFenceWin32HandleKHR(
VkDevice device, const VkImportFenceWin32HandleInfoKHR *pImportFenceWin32HandleInfo) const {
return ValidateImportFence(pImportFenceWin32HandleInfo->fence, "vkImportFenceWin32HandleKHR");
}
#endif // VK_USE_PLATFORM_WIN32_KHR
bool CoreChecks::PreCallValidateImportFenceFdKHR(VkDevice device, const VkImportFenceFdInfoKHR *pImportFenceFdInfo) const {
return ValidateImportFence(pImportFenceFdInfo->fence, "vkImportFenceFdKHR");
}
bool CoreChecks::ValidateCreateSwapchain(const char *func_name, VkSwapchainCreateInfoKHR const *pCreateInfo,
const SURFACE_STATE *surface_state, const SWAPCHAIN_NODE *old_swapchain_state) const {
// All physical devices and queue families are required to be able to present to any native window on Android; require the
// application to have established support on any other platform.
if (!instance_extensions.vk_khr_android_surface) {
auto support_predicate = [this](decltype(surface_state->gpu_queue_support)::value_type qs) -> bool {
// TODO: should restrict search only to queue families of VkDeviceQueueCreateInfos, not whole phys. device
return (qs.first.gpu == physical_device) && qs.second;
};
const auto &support = surface_state->gpu_queue_support;
bool is_supported = std::any_of(support.begin(), support.end(), support_predicate);
if (!is_supported) {
if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device),
"VUID-VkSwapchainCreateInfoKHR-surface-01270",
"%s: pCreateInfo->surface is not known at this time to be supported for presentation by this device. The "
"vkGetPhysicalDeviceSurfaceSupportKHR() must be called beforehand, and it must return VK_TRUE support with "
"this surface for at least one queue family of this device.",
func_name))
return true;
}
}
if (old_swapchain_state) {
if (old_swapchain_state->createInfo.surface != pCreateInfo->surface) {
if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
HandleToUint64(pCreateInfo->oldSwapchain), "VUID-VkSwapchainCreateInfoKHR-oldSwapchain-01933",
"%s: pCreateInfo->oldSwapchain's surface is not pCreateInfo->surface", func_name))
return true;
}
if (old_swapchain_state->retired) {
if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
HandleToUint64(pCreateInfo->oldSwapchain), "VUID-VkSwapchainCreateInfoKHR-oldSwapchain-01933",
"%s: pCreateInfo->oldSwapchain is retired", func_name))
return true;
}
}
if ((pCreateInfo->imageExtent.width == 0) || (pCreateInfo->imageExtent.height == 0)) {
if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device),
"VUID-VkSwapchainCreateInfoKHR-imageExtent-01689", "%s: pCreateInfo->imageExtent = (%d, %d) which is illegal.",
func_name, pCreateInfo->imageExtent.width, pCreateInfo->imageExtent.height))
return true;
}
auto physical_device_state = GetPhysicalDeviceState();
bool skip = false;
VkSurfaceTransformFlagBitsKHR currentTransform = physical_device_state->surfaceCapabilities.currentTransform;
if ((pCreateInfo->preTransform & currentTransform) != pCreateInfo->preTransform) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
HandleToUint64(physical_device), kVUID_Core_Swapchain_PreTransform,
"%s: pCreateInfo->preTransform (%s) doesn't match the currentTransform (%s) returned by "
"vkGetPhysicalDeviceSurfaceCapabilitiesKHR, the presentation engine will transform the image "
"content as part of the presentation operation.",
func_name, string_VkSurfaceTransformFlagBitsKHR(pCreateInfo->preTransform),
string_VkSurfaceTransformFlagBitsKHR(currentTransform));
}
VkSurfaceCapabilitiesKHR capabilities{};
DispatchGetPhysicalDeviceSurfaceCapabilitiesKHR(physical_device_state->phys_device, pCreateInfo->surface, &capabilities);
// Validate pCreateInfo->minImageCount against VkSurfaceCapabilitiesKHR::{min|max}ImageCount:
if (pCreateInfo->minImageCount < capabilities.minImageCount) {
if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device),
"VUID-VkSwapchainCreateInfoKHR-minImageCount-01271",
"%s called with minImageCount = %d, which is outside the bounds returned by "
"vkGetPhysicalDeviceSurfaceCapabilitiesKHR() (i.e. minImageCount = %d, maxImageCount = %d).",
func_name, pCreateInfo->minImageCount, capabilities.minImageCount, capabilities.maxImageCount))
return true;
}
if ((capabilities.maxImageCount > 0) && (pCreateInfo->minImageCount > capabilities.maxImageCount)) {
if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device),
"VUID-VkSwapchainCreateInfoKHR-minImageCount-01272",
"%s called with minImageCount = %d, which is outside the bounds returned by "
"vkGetPhysicalDeviceSurfaceCapabilitiesKHR() (i.e. minImageCount = %d, maxImageCount = %d).",
func_name, pCreateInfo->minImageCount, capabilities.minImageCount, capabilities.maxImageCount))
return true;
}
// Validate pCreateInfo->imageExtent against VkSurfaceCapabilitiesKHR::{current|min|max}ImageExtent:
if ((pCreateInfo->imageExtent.width < capabilities.minImageExtent.width) ||
(pCreateInfo->imageExtent.width > capabilities.maxImageExtent.width) ||
(pCreateInfo->imageExtent.height < capabilities.minImageExtent.height) ||
(pCreateInfo->imageExtent.height > capabilities.maxImageExtent.height)) {
if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device),
"VUID-VkSwapchainCreateInfoKHR-imageExtent-01274",
"%s called with imageExtent = (%d,%d), which is outside the bounds returned by "
"vkGetPhysicalDeviceSurfaceCapabilitiesKHR(): currentExtent = (%d,%d), minImageExtent = (%d,%d), "
"maxImageExtent = (%d,%d).",
func_name, pCreateInfo->imageExtent.width, pCreateInfo->imageExtent.height, capabilities.currentExtent.width,
capabilities.currentExtent.height, capabilities.minImageExtent.width, capabilities.minImageExtent.height,
capabilities.maxImageExtent.width, capabilities.maxImageExtent.height))
return true;
}
// pCreateInfo->preTransform should have exactly one bit set, and that bit must also be set in
// VkSurfaceCapabilitiesKHR::supportedTransforms.
if (!pCreateInfo->preTransform || (pCreateInfo->preTransform & (pCreateInfo->preTransform - 1)) ||
!(pCreateInfo->preTransform & capabilities.supportedTransforms)) {
// This is an error situation; one for which we'd like to give the developer a helpful, multi-line error message. Build
// it up a little at a time, and then log it:
std::string errorString = "";
char str[1024];
// Here's the first part of the message:
sprintf(str, "%s called with a non-supported pCreateInfo->preTransform (i.e. %s). Supported values are:\n", func_name,
string_VkSurfaceTransformFlagBitsKHR(pCreateInfo->preTransform));
errorString += str;
for (int i = 0; i < 32; i++) {
// Build up the rest of the message:
if ((1 << i) & capabilities.supportedTransforms) {
const char *newStr = string_VkSurfaceTransformFlagBitsKHR((VkSurfaceTransformFlagBitsKHR)(1 << i));
sprintf(str, " %s\n", newStr);
errorString += str;
}
}
// Log the message that we've built up:
if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device),
"VUID-VkSwapchainCreateInfoKHR-preTransform-01279", "%s.", errorString.c_str()))
return true;
}
// pCreateInfo->compositeAlpha should have exactly one bit set, and that bit must also be set in
// VkSurfaceCapabilitiesKHR::supportedCompositeAlpha
if (!pCreateInfo->compositeAlpha || (pCreateInfo->compositeAlpha & (pCreateInfo->compositeAlpha - 1)) ||
!((pCreateInfo->compositeAlpha) & capabilities.supportedCompositeAlpha)) {
// This is an error situation; one for which we'd like to give the developer a helpful, multi-line error message. Build
// it up a little at a time, and then log it:
std::string errorString = "";
char str[1024];
// Here's the first part of the message:
sprintf(str, "%s called with a non-supported pCreateInfo->compositeAlpha (i.e. %s). Supported values are:\n", func_name,
string_VkCompositeAlphaFlagBitsKHR(pCreateInfo->compositeAlpha));
errorString += str;
for (int i = 0; i < 32; i++) {
// Build up the rest of the message:
if ((1 << i) & capabilities.supportedCompositeAlpha) {
const char *newStr = string_VkCompositeAlphaFlagBitsKHR((VkCompositeAlphaFlagBitsKHR)(1 << i));
sprintf(str, " %s\n", newStr);
errorString += str;
}
}
// Log the message that we've built up:
if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device),
"VUID-VkSwapchainCreateInfoKHR-compositeAlpha-01280", "%s.", errorString.c_str()))
return true;
}
// Validate pCreateInfo->imageArrayLayers against VkSurfaceCapabilitiesKHR::maxImageArrayLayers:
if (pCreateInfo->imageArrayLayers > capabilities.maxImageArrayLayers) {
if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device),
"VUID-VkSwapchainCreateInfoKHR-imageArrayLayers-01275",
"%s called with a non-supported imageArrayLayers (i.e. %d). Maximum value is %d.", func_name,
pCreateInfo->imageArrayLayers, capabilities.maxImageArrayLayers))
return true;
}
// Validate pCreateInfo->imageUsage against VkSurfaceCapabilitiesKHR::supportedUsageFlags:
if (pCreateInfo->imageUsage != (pCreateInfo->imageUsage & capabilities.supportedUsageFlags)) {
if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device),
"VUID-VkSwapchainCreateInfoKHR-imageUsage-01276",
"%s called with a non-supported pCreateInfo->imageUsage (i.e. 0x%08x). Supported flag bits are 0x%08x.",
func_name, pCreateInfo->imageUsage, capabilities.supportedUsageFlags))
return true;
}
if (device_extensions.vk_khr_surface_protected_capabilities && (pCreateInfo->flags & VK_SWAPCHAIN_CREATE_PROTECTED_BIT_KHR)) {
VkPhysicalDeviceSurfaceInfo2KHR surfaceInfo = {VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SURFACE_INFO_2_KHR};
surfaceInfo.surface = pCreateInfo->surface;
VkSurfaceProtectedCapabilitiesKHR surfaceProtectedCapabilities = {VK_STRUCTURE_TYPE_SURFACE_PROTECTED_CAPABILITIES_KHR};
VkSurfaceCapabilities2KHR surfaceCapabilities = {VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_2_KHR};
surfaceCapabilities.pNext = &surfaceProtectedCapabilities;
DispatchGetPhysicalDeviceSurfaceCapabilities2KHR(physical_device_state->phys_device, &surfaceInfo, &surfaceCapabilities);
if (!surfaceProtectedCapabilities.supportsProtected) {
if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device),
"VUID-VkSwapchainCreateInfoKHR-flags-03187",
"%s: pCreateInfo->flags contains VK_SWAPCHAIN_CREATE_PROTECTED_BIT_KHR but the surface "
"capabilities does not have VkSurfaceProtectedCapabilitiesKHR.supportsProtected set to VK_TRUE.",
func_name))
return true;
}
}
std::vector<VkSurfaceFormatKHR> surface_formats;
const auto *surface_formats_ref = &surface_formats;
// Validate pCreateInfo values with the results of vkGetPhysicalDeviceSurfaceFormatsKHR():
if (physical_device_state->surface_formats.empty()) {
uint32_t surface_format_count = 0;
DispatchGetPhysicalDeviceSurfaceFormatsKHR(physical_device, pCreateInfo->surface, &surface_format_count, nullptr);
surface_formats.resize(surface_format_count);
DispatchGetPhysicalDeviceSurfaceFormatsKHR(physical_device, pCreateInfo->surface, &surface_format_count,
&surface_formats[0]);
} else {
surface_formats_ref = &physical_device_state->surface_formats;
}
{
// Validate pCreateInfo->imageFormat against VkSurfaceFormatKHR::format:
bool foundFormat = false;
bool foundColorSpace = false;
bool foundMatch = false;
for (auto const &format : *surface_formats_ref) {
if (pCreateInfo->imageFormat == format.format) {
// Validate pCreateInfo->imageColorSpace against VkSurfaceFormatKHR::colorSpace:
foundFormat = true;
if (pCreateInfo->imageColorSpace == format.colorSpace) {
foundMatch = true;
break;
}
} else {
if (pCreateInfo->imageColorSpace == format.colorSpace) {
foundColorSpace = true;
}
}
}
if (!foundMatch) {
if (!foundFormat) {
if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(device), "VUID-VkSwapchainCreateInfoKHR-imageFormat-01273",
"%s called with a non-supported pCreateInfo->imageFormat (i.e. %d).", func_name,
pCreateInfo->imageFormat))
return true;
}
if (!foundColorSpace) {
if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(device), "VUID-VkSwapchainCreateInfoKHR-imageFormat-01273",
"%s called with a non-supported pCreateInfo->imageColorSpace (i.e. %d).", func_name,
pCreateInfo->imageColorSpace))
return true;
}
}
}
std::vector<VkPresentModeKHR> present_modes;
const auto *present_modes_ref = &present_modes;
// Validate pCreateInfo values with the results of vkGetPhysicalDeviceSurfacePresentModesKHR():
if (physical_device_state->present_modes.empty()) {
uint32_t present_mode_count = 0;
DispatchGetPhysicalDeviceSurfacePresentModesKHR(physical_device_state->phys_device, pCreateInfo->surface,
&present_mode_count, nullptr);
present_modes.resize(present_mode_count);
DispatchGetPhysicalDeviceSurfacePresentModesKHR(physical_device_state->phys_device, pCreateInfo->surface,
&present_mode_count, &present_modes[0]);
} else {
present_modes_ref = &physical_device_state->present_modes;
}
// Validate pCreateInfo->presentMode against vkGetPhysicalDeviceSurfacePresentModesKHR():
bool foundMatch =
std::find(present_modes_ref->begin(), present_modes_ref->end(), pCreateInfo->presentMode) != present_modes_ref->end();
if (!foundMatch) {
if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device),
"VUID-VkSwapchainCreateInfoKHR-presentMode-01281", "%s called with a non-supported presentMode (i.e. %s).",
func_name, string_VkPresentModeKHR(pCreateInfo->presentMode)))
return true;
}
// Validate state for shared presentable case
if (VK_PRESENT_MODE_SHARED_DEMAND_REFRESH_KHR == pCreateInfo->presentMode ||
VK_PRESENT_MODE_SHARED_CONTINUOUS_REFRESH_KHR == pCreateInfo->presentMode) {
if (!device_extensions.vk_khr_shared_presentable_image) {
if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device),
kVUID_Core_DrawState_ExtensionNotEnabled,
"%s called with presentMode %s which requires the VK_KHR_shared_presentable_image extension, which has not "
"been enabled.",
func_name, string_VkPresentModeKHR(pCreateInfo->presentMode)))
return true;
} else if (pCreateInfo->minImageCount != 1) {
if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device),
"VUID-VkSwapchainCreateInfoKHR-minImageCount-01383",
"%s called with presentMode %s, but minImageCount value is %d. For shared presentable image, minImageCount "
"must be 1.",
func_name, string_VkPresentModeKHR(pCreateInfo->presentMode), pCreateInfo->minImageCount))
return true;
}
}
if (pCreateInfo->flags & VK_SWAPCHAIN_CREATE_MUTABLE_FORMAT_BIT_KHR) {
if (!device_extensions.vk_khr_swapchain_mutable_format) {
if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device),
kVUID_Core_DrawState_ExtensionNotEnabled,
"%s: pCreateInfo->flags contains VK_SWAPCHAIN_CREATE_MUTABLE_FORMAT_BIT_KHR which requires the "
"VK_KHR_swapchain_mutable_format extension, which has not been enabled.",
func_name))
return true;
} else {
const auto *image_format_list = lvl_find_in_chain<VkImageFormatListCreateInfoKHR>(pCreateInfo->pNext);
if (image_format_list == nullptr) {
if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(device), "VUID-VkSwapchainCreateInfoKHR-flags-03168",
"%s: pCreateInfo->flags contains VK_SWAPCHAIN_CREATE_MUTABLE_FORMAT_BIT_KHR but the pNext chain of "
"pCreateInfo does not contain an instance of VkImageFormatListCreateInfoKHR.",
func_name))
return true;
} else if (image_format_list->viewFormatCount == 0) {
if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(device), "VUID-VkSwapchainCreateInfoKHR-flags-03168",
"%s: pCreateInfo->flags contains VK_SWAPCHAIN_CREATE_MUTABLE_FORMAT_BIT_KHR but the viewFormatCount "
"member of VkImageFormatListCreateInfoKHR in the pNext chain is zero.",
func_name))
return true;
} else {
bool found_base_format = false;
for (uint32_t i = 0; i < image_format_list->viewFormatCount; ++i) {
if (image_format_list->pViewFormats[i] == pCreateInfo->imageFormat) {
found_base_format = true;
break;
}
}
if (!found_base_format) {
if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(device), "VUID-VkSwapchainCreateInfoKHR-flags-03168",
"%s: pCreateInfo->flags contains VK_SWAPCHAIN_CREATE_MUTABLE_FORMAT_BIT_KHR but none of the "
"elements of the pViewFormats member of VkImageFormatListCreateInfoKHR match "
"pCreateInfo->imageFormat.",
func_name))
return true;
}
}
}
}
if ((pCreateInfo->imageSharingMode == VK_SHARING_MODE_CONCURRENT) && pCreateInfo->pQueueFamilyIndices) {
bool skip1 =
ValidateQueueFamilies(pCreateInfo->queueFamilyIndexCount, pCreateInfo->pQueueFamilyIndices, "vkCreateBuffer",
"pCreateInfo->pQueueFamilyIndices", "VUID-VkSwapchainCreateInfoKHR-imageSharingMode-01428",
"VUID-VkSwapchainCreateInfoKHR-imageSharingMode-01428", false);
if (skip1) return true;
}
return skip;
}
bool CoreChecks::PreCallValidateCreateSwapchainKHR(VkDevice device, const VkSwapchainCreateInfoKHR *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkSwapchainKHR *pSwapchain) const {
const auto surface_state = GetSurfaceState(pCreateInfo->surface);
const auto old_swapchain_state = GetSwapchainState(pCreateInfo->oldSwapchain);
return ValidateCreateSwapchain("vkCreateSwapchainKHR()", pCreateInfo, surface_state, old_swapchain_state);
}
void CoreChecks::PreCallRecordDestroySwapchainKHR(VkDevice device, VkSwapchainKHR swapchain,
const VkAllocationCallbacks *pAllocator) {
if (swapchain) {
auto swapchain_data = GetSwapchainState(swapchain);
if (swapchain_data) {
for (const auto &swapchain_image : swapchain_data->images) {
auto image_sub = imageSubresourceMap.find(swapchain_image.image);
if (image_sub != imageSubresourceMap.end()) {
for (auto imgsubpair : image_sub->second) {
auto image_item = imageLayoutMap.find(imgsubpair);
if (image_item != imageLayoutMap.end()) {
imageLayoutMap.erase(image_item);
}
}
imageSubresourceMap.erase(image_sub);
}
EraseQFOImageRelaseBarriers(swapchain_image.image);
}
}
}
StateTracker::PreCallRecordDestroySwapchainKHR(device, swapchain, pAllocator);
}
bool CoreChecks::PreCallValidateGetSwapchainImagesKHR(VkDevice device, VkSwapchainKHR swapchain, uint32_t *pSwapchainImageCount,
VkImage *pSwapchainImages) const {
auto swapchain_state = GetSwapchainState(swapchain);
bool skip = false;
if (swapchain_state && pSwapchainImages) {
if (*pSwapchainImageCount > swapchain_state->get_swapchain_image_count) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device),
kVUID_Core_Swapchain_InvalidCount,
"vkGetSwapchainImagesKHR() called with non-NULL pSwapchainImageCount, and with pSwapchainImages set to a "
"value (%d) that is greater than the value (%d) that was returned when pSwapchainImageCount was NULL.",
*pSwapchainImageCount, swapchain_state->get_swapchain_image_count);
}
}
return skip;
}
void CoreChecks::PostCallRecordGetSwapchainImagesKHR(VkDevice device, VkSwapchainKHR swapchain, uint32_t *pSwapchainImageCount,
VkImage *pSwapchainImages, VkResult result) {
// Usually we'd call the StateTracker first, but
// a) none of the new state needed below is from the StateTracker
// b) StateTracker *will* update swapchain_state->images which we use to guard against double initialization
// so we'll do it in the opposite order -- CoreChecks then StateTracker.
//
// Note, this will get trickier if we start storing image shared pointers in the image layout data, at which point
// we'll have to reverse the order *back* and find some other scheme to prevent double initialization.
if (((result == VK_SUCCESS) || (result == VK_INCOMPLETE)) && pSwapchainImages) {
// Initialze image layout tracking data
auto swapchain_state = GetSwapchainState(swapchain);
const auto image_vector_size = swapchain_state->images.size();
IMAGE_LAYOUT_STATE image_layout_node;
image_layout_node.layout = VK_IMAGE_LAYOUT_UNDEFINED;
image_layout_node.format = swapchain_state->createInfo.imageFormat;
for (uint32_t i = 0; i < *pSwapchainImageCount; ++i) {
// This is check makes sure that we don't have an image initialized for this swapchain index, but
// given that it's StateTracker that stores this information, need to protect against non-extant entries in the vector
if ((i < image_vector_size) && (swapchain_state->images[i].image != VK_NULL_HANDLE)) continue;
ImageSubresourcePair subpair = {pSwapchainImages[i], false, VkImageSubresource()};
imageSubresourceMap[pSwapchainImages[i]].push_back(subpair);
imageLayoutMap[subpair] = image_layout_node;
}
}
// Now call the base class
StateTracker::PostCallRecordGetSwapchainImagesKHR(device, swapchain, pSwapchainImageCount, pSwapchainImages, result);
}
bool CoreChecks::PreCallValidateQueuePresentKHR(VkQueue queue, const VkPresentInfoKHR *pPresentInfo) const {
bool skip = false;
const auto queue_state = GetQueueState(queue);
for (uint32_t i = 0; i < pPresentInfo->waitSemaphoreCount; ++i) {
const auto pSemaphore = GetSemaphoreState(pPresentInfo->pWaitSemaphores[i]);
if (pSemaphore && pSemaphore->type != VK_SEMAPHORE_TYPE_BINARY_KHR) {
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
HandleToUint64(pPresentInfo->pWaitSemaphores[i]),
"VUID-vkQueuePresentKHR-pWaitSemaphores-03267", // VUID-VkPresentInfoKHR-pWaitSemaphores-03269 could fit also!!
"VkQueuePresent: %s is not a VK_SEMAPHORE_TYPE_BINARY_KHR",
report_data->FormatHandle(pPresentInfo->pWaitSemaphores[i]).c_str());
}
if (pSemaphore && !pSemaphore->signaled) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
kVUID_Core_DrawState_QueueForwardProgress, "%s is waiting on %s that has no way to be signaled.",
report_data->FormatHandle(queue).c_str(),
report_data->FormatHandle(pPresentInfo->pWaitSemaphores[i]).c_str());
}
}
for (uint32_t i = 0; i < pPresentInfo->swapchainCount; ++i) {
const auto swapchain_data = GetSwapchainState(pPresentInfo->pSwapchains[i]);
if (swapchain_data) {
if (pPresentInfo->pImageIndices[i] >= swapchain_data->images.size()) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
HandleToUint64(pPresentInfo->pSwapchains[i]), kVUID_Core_DrawState_SwapchainInvalidImage,
"vkQueuePresentKHR: Swapchain image index too large (%u). There are only %u images in this swapchain.",
pPresentInfo->pImageIndices[i], (uint32_t)swapchain_data->images.size());
} else {
auto image = swapchain_data->images[pPresentInfo->pImageIndices[i]].image;
const auto image_state = GetImageState(image);
if (!image_state->acquired) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
HandleToUint64(pPresentInfo->pSwapchains[i]), kVUID_Core_DrawState_SwapchainImageNotAcquired,
"vkQueuePresentKHR: Swapchain image index %u has not been acquired.",
pPresentInfo->pImageIndices[i]);
}
vector<VkImageLayout> layouts;
if (FindLayouts(image, layouts)) {
for (auto layout : layouts) {
if ((layout != VK_IMAGE_LAYOUT_PRESENT_SRC_KHR) && (!device_extensions.vk_khr_shared_presentable_image ||
(layout != VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR))) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT,
HandleToUint64(queue), "VUID-VkPresentInfoKHR-pImageIndices-01296",
"Images passed to present must be in layout VK_IMAGE_LAYOUT_PRESENT_SRC_KHR or "
"VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR but is in %s.",
string_VkImageLayout(layout));
}
}
}
}
// All physical devices and queue families are required to be able to present to any native window on Android; require
// the application to have established support on any other platform.
if (!instance_extensions.vk_khr_android_surface) {
const auto surface_state = GetSurfaceState(swapchain_data->createInfo.surface);
auto support_it = surface_state->gpu_queue_support.find({physical_device, queue_state->queueFamilyIndex});
if (support_it == surface_state->gpu_queue_support.end()) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
HandleToUint64(pPresentInfo->pSwapchains[i]), kVUID_Core_DrawState_SwapchainUnsupportedQueue,
"vkQueuePresentKHR: Presenting image without calling vkGetPhysicalDeviceSurfaceSupportKHR");
} else if (!support_it->second) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
HandleToUint64(pPresentInfo->pSwapchains[i]), "VUID-vkQueuePresentKHR-pSwapchains-01292",
"vkQueuePresentKHR: Presenting image on queue that cannot present to this surface.");
}
}
}
}
if (pPresentInfo->pNext) {
// Verify ext struct
const auto *present_regions = lvl_find_in_chain<VkPresentRegionsKHR>(pPresentInfo->pNext);
if (present_regions) {
for (uint32_t i = 0; i < present_regions->swapchainCount; ++i) {
const auto swapchain_data = GetSwapchainState(pPresentInfo->pSwapchains[i]);
assert(swapchain_data);
VkPresentRegionKHR region = present_regions->pRegions[i];
for (uint32_t j = 0; j < region.rectangleCount; ++j) {
VkRectLayerKHR rect = region.pRectangles[j];
if ((rect.offset.x + rect.extent.width) > swapchain_data->createInfo.imageExtent.width) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
HandleToUint64(pPresentInfo->pSwapchains[i]), "VUID-VkRectLayerKHR-offset-01261",
"vkQueuePresentKHR(): For VkPresentRegionKHR down pNext chain, "
"pRegion[%i].pRectangles[%i], the sum of offset.x (%i) and extent.width (%i) is greater "
"than the corresponding swapchain's imageExtent.width (%i).",
i, j, rect.offset.x, rect.extent.width, swapchain_data->createInfo.imageExtent.width);
}
if ((rect.offset.y + rect.extent.height) > swapchain_data->createInfo.imageExtent.height) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
HandleToUint64(pPresentInfo->pSwapchains[i]), "VUID-VkRectLayerKHR-offset-01261",
"vkQueuePresentKHR(): For VkPresentRegionKHR down pNext chain, "
"pRegion[%i].pRectangles[%i], the sum of offset.y (%i) and extent.height (%i) is greater "
"than the corresponding swapchain's imageExtent.height (%i).",
i, j, rect.offset.y, rect.extent.height, swapchain_data->createInfo.imageExtent.height);
}
if (rect.layer > swapchain_data->createInfo.imageArrayLayers) {
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
HandleToUint64(pPresentInfo->pSwapchains[i]), "VUID-VkRectLayerKHR-layer-01262",
"vkQueuePresentKHR(): For VkPresentRegionKHR down pNext chain, pRegion[%i].pRectangles[%i], the layer "
"(%i) is greater than the corresponding swapchain's imageArrayLayers (%i).",
i, j, rect.layer, swapchain_data->createInfo.imageArrayLayers);
}
}
}
}
const auto *present_times_info = lvl_find_in_chain<VkPresentTimesInfoGOOGLE>(pPresentInfo->pNext);
if (present_times_info) {
if (pPresentInfo->swapchainCount != present_times_info->swapchainCount) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
HandleToUint64(pPresentInfo->pSwapchains[0]), "VUID-VkPresentTimesInfoGOOGLE-swapchainCount-01247",
"vkQueuePresentKHR(): VkPresentTimesInfoGOOGLE.swapchainCount is %i but pPresentInfo->swapchainCount "
"is %i. For VkPresentTimesInfoGOOGLE down pNext chain of VkPresentInfoKHR, "
"VkPresentTimesInfoGOOGLE.swapchainCount must equal VkPresentInfoKHR.swapchainCount.",
present_times_info->swapchainCount, pPresentInfo->swapchainCount);
}
}
}
return skip;
}
bool CoreChecks::PreCallValidateCreateSharedSwapchainsKHR(VkDevice device, uint32_t swapchainCount,
const VkSwapchainCreateInfoKHR *pCreateInfos,
const VkAllocationCallbacks *pAllocator,
VkSwapchainKHR *pSwapchains) const {
bool skip = false;
if (pCreateInfos) {
for (uint32_t i = 0; i < swapchainCount; i++) {
const auto surface_state = GetSurfaceState(pCreateInfos[i].surface);
const auto old_swapchain_state = GetSwapchainState(pCreateInfos[i].oldSwapchain);
std::stringstream func_name;
func_name << "vkCreateSharedSwapchainsKHR[" << swapchainCount << "]()";
skip |= ValidateCreateSwapchain(func_name.str().c_str(), &pCreateInfos[i], surface_state, old_swapchain_state);
}
}
return skip;
}
bool CoreChecks::ValidateAcquireNextImage(VkDevice device, const CommandVersion cmd_version, VkSwapchainKHR swapchain,
uint64_t timeout, VkSemaphore semaphore, VkFence fence, uint32_t *pImageIndex,
const char *func_name, const char *semaphore_type_vuid) const {
bool skip = false;
auto pSemaphore = GetSemaphoreState(semaphore);
if (pSemaphore && pSemaphore->type != VK_SEMAPHORE_TYPE_BINARY_KHR) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
HandleToUint64(semaphore), semaphore_type_vuid, "%s: %s is not a VK_SEMAPHORE_TYPE_BINARY_KHR", func_name,
report_data->FormatHandle(semaphore).c_str());
}
if (pSemaphore && pSemaphore->scope == kSyncScopeInternal && pSemaphore->signaled) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
HandleToUint64(semaphore), "VUID-vkAcquireNextImageKHR-semaphore-01286",
"%s: Semaphore must not be currently signaled or in a wait state.", func_name);
}
auto pFence = GetFenceState(fence);
if (pFence) {
skip |= ValidateFenceForSubmit(pFence);
}
const auto swapchain_data = GetSwapchainState(swapchain);
if (swapchain_data) {
if (swapchain_data->retired) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
HandleToUint64(swapchain), "VUID-vkAcquireNextImageKHR-swapchain-01285",
"%s: This swapchain has been retired. The application can still present any images it "
"has acquired, but cannot acquire any more.",
func_name);
}
auto physical_device_state = GetPhysicalDeviceState();
// TODO: this is technically wrong on many levels, but requires massive cleanup
if (physical_device_state->vkGetPhysicalDeviceSurfaceCapabilitiesKHR_called) {
const uint32_t acquired_images =
static_cast<uint32_t>(std::count_if(swapchain_data->images.begin(), swapchain_data->images.end(),
[=](SWAPCHAIN_IMAGE image) { return GetImageState(image.image)->acquired; }));
const uint32_t swapchain_image_count = static_cast<uint32_t>(swapchain_data->images.size());
const auto min_image_count = physical_device_state->surfaceCapabilities.minImageCount;
const bool too_many_already_acquired = acquired_images > swapchain_image_count - min_image_count;
if (timeout == UINT64_MAX && too_many_already_acquired) {
const char *vuid = "INVALID-vuid";
if (cmd_version == CMD_VERSION_1)
vuid = "VUID-vkAcquireNextImageKHR-swapchain-01802";
else if (cmd_version == CMD_VERSION_2)
vuid = "VUID-vkAcquireNextImage2KHR-swapchain-01803";
else
assert(false);
const uint32_t acquirable = swapchain_image_count - min_image_count + 1;
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
HandleToUint64(swapchain), vuid,
"%s: Application has already previously acquired %" PRIu32 " image%s from swapchain. Only %" PRIu32
" %s available to be acquired using a timeout of UINT64_MAX (given the swapchain has %" PRIu32
", and VkSurfaceCapabilitiesKHR::minImageCount is %" PRIu32 ").",
func_name, acquired_images, acquired_images > 1 ? "s" : "", acquirable,
acquirable > 1 ? "are" : "is", swapchain_image_count, min_image_count);
}
}
if (swapchain_data->images.size() == 0) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
HandleToUint64(swapchain), kVUID_Core_DrawState_SwapchainImagesNotFound,
"%s: No images found to acquire from. Application probably did not call "
"vkGetSwapchainImagesKHR after swapchain creation.",
func_name);
}
}
return skip;
}
bool CoreChecks::PreCallValidateAcquireNextImageKHR(VkDevice device, VkSwapchainKHR swapchain, uint64_t timeout,
VkSemaphore semaphore, VkFence fence, uint32_t *pImageIndex) const {
return ValidateAcquireNextImage(device, CMD_VERSION_1, swapchain, timeout, semaphore, fence, pImageIndex,
"vkAcquireNextImageKHR", "VUID-vkAcquireNextImageKHR-semaphore-03265");
}
bool CoreChecks::PreCallValidateAcquireNextImage2KHR(VkDevice device, const VkAcquireNextImageInfoKHR *pAcquireInfo,
uint32_t *pImageIndex) const {
bool skip = false;
skip |= ValidateDeviceMaskToPhysicalDeviceCount(pAcquireInfo->deviceMask, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
HandleToUint64(pAcquireInfo->swapchain),
"VUID-VkAcquireNextImageInfoKHR-deviceMask-01290");
skip |= ValidateDeviceMaskToZero(pAcquireInfo->deviceMask, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
HandleToUint64(pAcquireInfo->swapchain), "VUID-VkAcquireNextImageInfoKHR-deviceMask-01291");
skip |= ValidateAcquireNextImage(device, CMD_VERSION_2, pAcquireInfo->swapchain, pAcquireInfo->timeout, pAcquireInfo->semaphore,
pAcquireInfo->fence, pImageIndex, "vkAcquireNextImage2KHR",
"VUID-VkAcquireNextImageInfoKHR-semaphore-03266");
return skip;
}
bool CoreChecks::PreCallValidateDestroySurfaceKHR(VkInstance instance, VkSurfaceKHR surface,
const VkAllocationCallbacks *pAllocator) const {
const auto surface_state = GetSurfaceState(surface);
bool skip = false;
if ((surface_state) && (surface_state->swapchain)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT,
HandleToUint64(instance), "VUID-vkDestroySurfaceKHR-surface-01266",
"vkDestroySurfaceKHR() called before its associated VkSwapchainKHR was destroyed.");
}
return skip;
}
#ifdef VK_USE_PLATFORM_WAYLAND_KHR
bool CoreChecks::PreCallValidateGetPhysicalDeviceWaylandPresentationSupportKHR(VkPhysicalDevice physicalDevice,
uint32_t queueFamilyIndex,
struct wl_display *display) const {
const auto pd_state = GetPhysicalDeviceState(physicalDevice);
return ValidateQueueFamilyIndex(pd_state, queueFamilyIndex,
"VUID-vkGetPhysicalDeviceWaylandPresentationSupportKHR-queueFamilyIndex-01306",
"vkGetPhysicalDeviceWaylandPresentationSupportKHR", "queueFamilyIndex");
}
#endif // VK_USE_PLATFORM_WAYLAND_KHR
#ifdef VK_USE_PLATFORM_WIN32_KHR
bool CoreChecks::PreCallValidateGetPhysicalDeviceWin32PresentationSupportKHR(VkPhysicalDevice physicalDevice,
uint32_t queueFamilyIndex) const {
const auto pd_state = GetPhysicalDeviceState(physicalDevice);
return ValidateQueueFamilyIndex(pd_state, queueFamilyIndex,
"VUID-vkGetPhysicalDeviceWin32PresentationSupportKHR-queueFamilyIndex-01309",
"vkGetPhysicalDeviceWin32PresentationSupportKHR", "queueFamilyIndex");
}
#endif // VK_USE_PLATFORM_WIN32_KHR
#ifdef VK_USE_PLATFORM_XCB_KHR
bool CoreChecks::PreCallValidateGetPhysicalDeviceXcbPresentationSupportKHR(VkPhysicalDevice physicalDevice,
uint32_t queueFamilyIndex, xcb_connection_t *connection,
xcb_visualid_t visual_id) const {
const auto pd_state = GetPhysicalDeviceState(physicalDevice);
return ValidateQueueFamilyIndex(pd_state, queueFamilyIndex,
"VUID-vkGetPhysicalDeviceXcbPresentationSupportKHR-queueFamilyIndex-01312",
"vkGetPhysicalDeviceXcbPresentationSupportKHR", "queueFamilyIndex");
}
#endif // VK_USE_PLATFORM_XCB_KHR
#ifdef VK_USE_PLATFORM_XLIB_KHR
bool CoreChecks::PreCallValidateGetPhysicalDeviceXlibPresentationSupportKHR(VkPhysicalDevice physicalDevice,
uint32_t queueFamilyIndex, Display *dpy,
VisualID visualID) const {
const auto pd_state = GetPhysicalDeviceState(physicalDevice);
return ValidateQueueFamilyIndex(pd_state, queueFamilyIndex,
"VUID-vkGetPhysicalDeviceXlibPresentationSupportKHR-queueFamilyIndex-01315",
"vkGetPhysicalDeviceXlibPresentationSupportKHR", "queueFamilyIndex");
}
#endif // VK_USE_PLATFORM_XLIB_KHR
bool CoreChecks::PreCallValidateGetPhysicalDeviceSurfaceSupportKHR(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex,
VkSurfaceKHR surface, VkBool32 *pSupported) const {
const auto physical_device_state = GetPhysicalDeviceState(physicalDevice);
return ValidateQueueFamilyIndex(physical_device_state, queueFamilyIndex,
"VUID-vkGetPhysicalDeviceSurfaceSupportKHR-queueFamilyIndex-01269",
"vkGetPhysicalDeviceSurfaceSupportKHR", "queueFamilyIndex");
}
bool CoreChecks::ValidateDescriptorUpdateTemplate(const char *func_name,
const VkDescriptorUpdateTemplateCreateInfoKHR *pCreateInfo) const {
bool skip = false;
const auto layout = GetDescriptorSetLayoutShared(pCreateInfo->descriptorSetLayout);
if (VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET == pCreateInfo->templateType && !layout) {
const VulkanTypedHandle ds_typed(pCreateInfo->descriptorSetLayout, kVulkanObjectTypeDescriptorSetLayout);
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT, ds_typed.handle,
"VUID-VkDescriptorUpdateTemplateCreateInfo-templateType-00350",
"%s: Invalid pCreateInfo->descriptorSetLayout (%s)", func_name, report_data->FormatHandle(ds_typed).c_str());
} else if (VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_PUSH_DESCRIPTORS_KHR == pCreateInfo->templateType) {
auto bind_point = pCreateInfo->pipelineBindPoint;
bool valid_bp = (bind_point == VK_PIPELINE_BIND_POINT_GRAPHICS) || (bind_point == VK_PIPELINE_BIND_POINT_COMPUTE);
if (!valid_bp) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkDescriptorUpdateTemplateCreateInfo-templateType-00351",
"%s: Invalid pCreateInfo->pipelineBindPoint (%" PRIu32 ").", func_name, static_cast<uint32_t>(bind_point));
}
const auto pipeline_layout = GetPipelineLayout(pCreateInfo->pipelineLayout);
if (!pipeline_layout) {
const VulkanTypedHandle pl_typed(pCreateInfo->pipelineLayout, kVulkanObjectTypePipelineLayout);
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_LAYOUT_EXT,
pl_typed.handle, "VUID-VkDescriptorUpdateTemplateCreateInfo-templateType-00352",
"%s: Invalid pCreateInfo->pipelineLayout (%s)", func_name, report_data->FormatHandle(pl_typed).c_str());
} else {
const uint32_t pd_set = pCreateInfo->set;
if ((pd_set >= pipeline_layout->set_layouts.size()) || !pipeline_layout->set_layouts[pd_set] ||
!pipeline_layout->set_layouts[pd_set]->IsPushDescriptor()) {
const VulkanTypedHandle pl_typed(pCreateInfo->pipelineLayout, kVulkanObjectTypePipelineLayout);
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_LAYOUT_EXT,
pl_typed.handle, "VUID-VkDescriptorUpdateTemplateCreateInfo-templateType-00353",
"%s: pCreateInfo->set (%" PRIu32
") does not refer to the push descriptor set layout for pCreateInfo->pipelineLayout (%s).",
func_name, pd_set, report_data->FormatHandle(pl_typed).c_str());
}
}
}
return skip;
}
bool CoreChecks::PreCallValidateCreateDescriptorUpdateTemplate(VkDevice device,
const VkDescriptorUpdateTemplateCreateInfoKHR *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkDescriptorUpdateTemplateKHR *pDescriptorUpdateTemplate) const {
bool skip = ValidateDescriptorUpdateTemplate("vkCreateDescriptorUpdateTemplate()", pCreateInfo);
return skip;
}
bool CoreChecks::PreCallValidateCreateDescriptorUpdateTemplateKHR(VkDevice device,
const VkDescriptorUpdateTemplateCreateInfoKHR *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkDescriptorUpdateTemplateKHR *pDescriptorUpdateTemplate) const {
bool skip = ValidateDescriptorUpdateTemplate("vkCreateDescriptorUpdateTemplateKHR()", pCreateInfo);
return skip;
}
bool CoreChecks::ValidateUpdateDescriptorSetWithTemplate(VkDescriptorSet descriptorSet,
VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
const void *pData) const {
bool skip = false;
auto const template_map_entry = desc_template_map.find(descriptorUpdateTemplate);
if ((template_map_entry == desc_template_map.end()) || (template_map_entry->second.get() == nullptr)) {
// Object tracker will report errors for invalid descriptorUpdateTemplate values, avoiding a crash in release builds
// but retaining the assert as template support is new enough to want to investigate these in debug builds.
assert(0);
} else {
const TEMPLATE_STATE *template_state = template_map_entry->second.get();
// TODO: Validate template push descriptor updates
if (template_state->create_info.templateType == VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET) {
skip = ValidateUpdateDescriptorSetsWithTemplateKHR(descriptorSet, template_state, pData);
}
}
return skip;
}
bool CoreChecks::PreCallValidateUpdateDescriptorSetWithTemplate(VkDevice device, VkDescriptorSet descriptorSet,
VkDescriptorUpdateTemplate descriptorUpdateTemplate,
const void *pData) const {
return ValidateUpdateDescriptorSetWithTemplate(descriptorSet, descriptorUpdateTemplate, pData);
}
bool CoreChecks::PreCallValidateUpdateDescriptorSetWithTemplateKHR(VkDevice device, VkDescriptorSet descriptorSet,
VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
const void *pData) const {
return ValidateUpdateDescriptorSetWithTemplate(descriptorSet, descriptorUpdateTemplate, pData);
}
bool CoreChecks::PreCallValidateCmdPushDescriptorSetWithTemplateKHR(VkCommandBuffer commandBuffer,
VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
VkPipelineLayout layout, uint32_t set,
const void *pData) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
const char *const func_name = "vkPushDescriptorSetWithTemplateKHR()";
bool skip = false;
skip |= ValidateCmd(cb_state, CMD_PUSHDESCRIPTORSETWITHTEMPLATEKHR, func_name);
const auto layout_data = GetPipelineLayout(layout);
const auto dsl = GetDslFromPipelineLayout(layout_data, set);
const VulkanTypedHandle layout_typed(layout, kVulkanObjectTypePipelineLayout);
// Validate the set index points to a push descriptor set and is in range
if (dsl) {
if (!dsl->IsPushDescriptor()) {
skip = log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_LAYOUT_EXT,
layout_typed.handle, "VUID-vkCmdPushDescriptorSetKHR-set-00365",
"%s: Set index %" PRIu32 " does not match push descriptor set layout index for %s.", func_name, set,
report_data->FormatHandle(layout_typed).c_str());
}
} else if (layout_data && (set >= layout_data->set_layouts.size())) {
skip = log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_LAYOUT_EXT,
layout_typed.handle, "VUID-vkCmdPushDescriptorSetKHR-set-00364",
"%s: Set index %" PRIu32 " is outside of range for %s (set < %" PRIu32 ").", func_name, set,
report_data->FormatHandle(layout_typed).c_str(), static_cast<uint32_t>(layout_data->set_layouts.size()));
}
const auto template_state = GetDescriptorTemplateState(descriptorUpdateTemplate);
if (template_state) {
const auto &template_ci = template_state->create_info;
static const std::map<VkPipelineBindPoint, std::string> bind_errors = {
std::make_pair(VK_PIPELINE_BIND_POINT_GRAPHICS, "VUID-vkCmdPushDescriptorSetWithTemplateKHR-commandBuffer-00366"),
std::make_pair(VK_PIPELINE_BIND_POINT_COMPUTE, "VUID-vkCmdPushDescriptorSetWithTemplateKHR-commandBuffer-00366"),
std::make_pair(VK_PIPELINE_BIND_POINT_RAY_TRACING_NV,
"VUID-vkCmdPushDescriptorSetWithTemplateKHR-commandBuffer-00366")};
skip |= ValidatePipelineBindPoint(cb_state, template_ci.pipelineBindPoint, func_name, bind_errors);
if (template_ci.templateType != VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_PUSH_DESCRIPTORS_KHR) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_state->commandBuffer), kVUID_Core_PushDescriptorUpdate_TemplateType,
"%s: descriptorUpdateTemplate %s was not created with flag "
"VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_PUSH_DESCRIPTORS_KHR.",
func_name, report_data->FormatHandle(descriptorUpdateTemplate).c_str());
}
if (template_ci.set != set) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_state->commandBuffer), kVUID_Core_PushDescriptorUpdate_Template_SetMismatched,
"%s: descriptorUpdateTemplate %s created with set %" PRIu32
" does not match command parameter set %" PRIu32 ".",
func_name, report_data->FormatHandle(descriptorUpdateTemplate).c_str(), template_ci.set, set);
}
if (!CompatForSet(set, layout_data, GetPipelineLayout(template_ci.pipelineLayout))) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_state->commandBuffer), kVUID_Core_PushDescriptorUpdate_Template_LayoutMismatched,
"%s: descriptorUpdateTemplate %s created with %s is incompatible with command parameter "
"%s for set %" PRIu32,
func_name, report_data->FormatHandle(descriptorUpdateTemplate).c_str(),
report_data->FormatHandle(template_ci.pipelineLayout).c_str(),
report_data->FormatHandle(layout).c_str(), set);
}
}
if (dsl && template_state) {
// Create an empty proxy in order to use the existing descriptor set update validation
cvdescriptorset::DescriptorSet proxy_ds(VK_NULL_HANDLE, nullptr, dsl, 0, nullptr, report_data);
// Decode the template into a set of write updates
cvdescriptorset::DecodedTemplateUpdate decoded_template(this, VK_NULL_HANDLE, template_state, pData,
dsl->GetDescriptorSetLayout());
// Validate the decoded update against the proxy_ds
skip |= ValidatePushDescriptorsUpdate(&proxy_ds, static_cast<uint32_t>(decoded_template.desc_writes.size()),
decoded_template.desc_writes.data(), func_name);
}
return skip;
}
bool CoreChecks::ValidateGetPhysicalDeviceDisplayPlanePropertiesKHRQuery(VkPhysicalDevice physicalDevice, uint32_t planeIndex,
const char *api_name) const {
bool skip = false;
const auto physical_device_state = GetPhysicalDeviceState(physicalDevice);
if (physical_device_state->vkGetPhysicalDeviceDisplayPlanePropertiesKHR_called) {
if (planeIndex >= physical_device_state->display_plane_property_count) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
HandleToUint64(physicalDevice), "VUID-vkGetDisplayPlaneSupportedDisplaysKHR-planeIndex-01249",
"%s(): planeIndex must be in the range [0, %d] that was returned by "
"vkGetPhysicalDeviceDisplayPlanePropertiesKHR "
"or vkGetPhysicalDeviceDisplayPlaneProperties2KHR. Do you have the plane index hardcoded?",
api_name, physical_device_state->display_plane_property_count - 1);
}
}
return skip;
}
bool CoreChecks::PreCallValidateGetDisplayPlaneSupportedDisplaysKHR(VkPhysicalDevice physicalDevice, uint32_t planeIndex,
uint32_t *pDisplayCount, VkDisplayKHR *pDisplays) const {
bool skip = false;
skip |= ValidateGetPhysicalDeviceDisplayPlanePropertiesKHRQuery(physicalDevice, planeIndex,
"vkGetDisplayPlaneSupportedDisplaysKHR");
return skip;
}
bool CoreChecks::PreCallValidateGetDisplayPlaneCapabilitiesKHR(VkPhysicalDevice physicalDevice, VkDisplayModeKHR mode,
uint32_t planeIndex,
VkDisplayPlaneCapabilitiesKHR *pCapabilities) const {
bool skip = false;
skip |= ValidateGetPhysicalDeviceDisplayPlanePropertiesKHRQuery(physicalDevice, planeIndex, "vkGetDisplayPlaneCapabilitiesKHR");
return skip;
}
bool CoreChecks::PreCallValidateGetDisplayPlaneCapabilities2KHR(VkPhysicalDevice physicalDevice,
const VkDisplayPlaneInfo2KHR *pDisplayPlaneInfo,
VkDisplayPlaneCapabilities2KHR *pCapabilities) const {
bool skip = false;
skip |= ValidateGetPhysicalDeviceDisplayPlanePropertiesKHRQuery(physicalDevice, pDisplayPlaneInfo->planeIndex,
"vkGetDisplayPlaneCapabilities2KHR");
return skip;
}
bool CoreChecks::PreCallValidateCmdDebugMarkerBeginEXT(VkCommandBuffer commandBuffer,
const VkDebugMarkerMarkerInfoEXT *pMarkerInfo) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
return ValidateCmd(cb_state, CMD_DEBUGMARKERBEGINEXT, "vkCmdDebugMarkerBeginEXT()");
}
bool CoreChecks::PreCallValidateCmdDebugMarkerEndEXT(VkCommandBuffer commandBuffer) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
return ValidateCmd(cb_state, CMD_DEBUGMARKERENDEXT, "vkCmdDebugMarkerEndEXT()");
}
bool CoreChecks::PreCallValidateCmdBeginQueryIndexedEXT(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t query,
VkQueryControlFlags flags, uint32_t index) const {
if (disabled.query_validation) return false;
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
QueryObject query_obj(queryPool, query, index);
const char *cmd_name = "vkCmdBeginQueryIndexedEXT()";
bool skip = ValidateBeginQuery(
cb_state, query_obj, flags, CMD_BEGINQUERYINDEXEDEXT, cmd_name, "VUID-vkCmdBeginQueryIndexedEXT-commandBuffer-cmdpool",
"VUID-vkCmdBeginQueryIndexedEXT-queryType-02338", "VUID-vkCmdBeginQueryIndexedEXT-queryType-00803",
"VUID-vkCmdBeginQueryIndexedEXT-queryType-00800", "VUID-vkCmdBeginQueryIndexedEXT-query-00802");
// Extension specific VU's
const auto &query_pool_ci = GetQueryPoolState(query_obj.pool)->createInfo;
if (query_pool_ci.queryType == VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT) {
if (device_extensions.vk_ext_transform_feedback &&
(index >= phys_dev_ext_props.transform_feedback_props.maxTransformFeedbackStreams)) {
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_state->commandBuffer), "VUID-vkCmdBeginQueryIndexedEXT-queryType-02339",
"%s: index %" PRIu32
" must be less than VkPhysicalDeviceTransformFeedbackPropertiesEXT::maxTransformFeedbackStreams %" PRIu32 ".",
cmd_name, index, phys_dev_ext_props.transform_feedback_props.maxTransformFeedbackStreams);
}
} else if (index != 0) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_state->commandBuffer), "VUID-vkCmdBeginQueryIndexedEXT-queryType-02340",
"%s: index %" PRIu32
" must be zero if %s was not created with type VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT.",
cmd_name, index, report_data->FormatHandle(queryPool).c_str());
}
return skip;
}
void CoreChecks::PreCallRecordCmdBeginQueryIndexedEXT(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t query,
VkQueryControlFlags flags, uint32_t index) {
if (disabled.query_validation) return;
QueryObject query_obj = {queryPool, query, index};
EnqueueVerifyBeginQuery(commandBuffer, query_obj, "vkCmdBeginQueryIndexedEXT()");
}
bool CoreChecks::PreCallValidateCmdEndQueryIndexedEXT(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t query,
uint32_t index) const {
if (disabled.query_validation) return false;
QueryObject query_obj = {queryPool, query, index};
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
return ValidateCmdEndQuery(cb_state, query_obj, CMD_ENDQUERYINDEXEDEXT, "vkCmdEndQueryIndexedEXT()",
"VUID-vkCmdEndQueryIndexedEXT-commandBuffer-cmdpool", "VUID-vkCmdEndQueryIndexedEXT-None-02342");
}
bool CoreChecks::PreCallValidateCmdSetDiscardRectangleEXT(VkCommandBuffer commandBuffer, uint32_t firstDiscardRectangle,
uint32_t discardRectangleCount,
const VkRect2D *pDiscardRectangles) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
// Minimal validation for command buffer state
return ValidateCmd(cb_state, CMD_SETDISCARDRECTANGLEEXT, "vkCmdSetDiscardRectangleEXT()");
}
bool CoreChecks::PreCallValidateCmdSetSampleLocationsEXT(VkCommandBuffer commandBuffer,
const VkSampleLocationsInfoEXT *pSampleLocationsInfo) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
// Minimal validation for command buffer state
return ValidateCmd(cb_state, CMD_SETSAMPLELOCATIONSEXT, "vkCmdSetSampleLocationsEXT()");
}
bool CoreChecks::ValidateCreateSamplerYcbcrConversion(const char *func_name,
const VkSamplerYcbcrConversionCreateInfo *create_info) const {
bool skip = false;
if (device_extensions.vk_android_external_memory_android_hardware_buffer) {
skip |= ValidateCreateSamplerYcbcrConversionANDROID(create_info);
} else { // Not android hardware buffer
if (VK_FORMAT_UNDEFINED == create_info->format) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION_EXT, 0,
"VUID-VkSamplerYcbcrConversionCreateInfo-format-01649",
"%s: CreateInfo format type is VK_FORMAT_UNDEFINED.", func_name);
}
}
return skip;
}
bool CoreChecks::PreCallValidateCreateSamplerYcbcrConversion(VkDevice device, const VkSamplerYcbcrConversionCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkSamplerYcbcrConversion *pYcbcrConversion) const {
return ValidateCreateSamplerYcbcrConversion("vkCreateSamplerYcbcrConversion()", pCreateInfo);
}
bool CoreChecks::PreCallValidateCreateSamplerYcbcrConversionKHR(VkDevice device,
const VkSamplerYcbcrConversionCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkSamplerYcbcrConversion *pYcbcrConversion) const {
return ValidateCreateSamplerYcbcrConversion("vkCreateSamplerYcbcrConversionKHR()", pCreateInfo);
}
bool CoreChecks::PreCallValidateGetBufferDeviceAddressEXT(VkDevice device, const VkBufferDeviceAddressInfoEXT *pInfo) const {
bool skip = false;
if (!enabled_features.buffer_address.bufferDeviceAddress) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
HandleToUint64(pInfo->buffer), "VUID-vkGetBufferDeviceAddressKHR-bufferDeviceAddress-03324",
"The bufferDeviceAddress feature must: be enabled.");
}
if (physical_device_count > 1 && !enabled_features.buffer_address.bufferDeviceAddressMultiDevice) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
HandleToUint64(pInfo->buffer), "VUID-vkGetBufferDeviceAddressKHR-device-03325",
"If device was created with multiple physical devices, then the "
"bufferDeviceAddressMultiDevice feature must: be enabled.");
}
const auto buffer_state = GetBufferState(pInfo->buffer);
if (buffer_state) {
if (!(buffer_state->createInfo.flags & VK_BUFFER_CREATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT_KHR)) {
skip |= ValidateMemoryIsBoundToBuffer(buffer_state, "vkGetBufferDeviceAddressEXT()",
"VUID-VkBufferDeviceAddressInfoKHR-buffer-02600");
}
skip |= ValidateBufferUsageFlags(buffer_state, VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_KHR, true,
"VUID-VkBufferDeviceAddressInfoKHR-buffer-02601", "vkGetBufferDeviceAddressEXT()",
"VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_EXT");
}
return skip;
}
bool CoreChecks::ValidateQueryRange(VkDevice device, VkQueryPool queryPool, uint32_t totalCount, uint32_t firstQuery,
uint32_t queryCount, const char *vuid_badfirst, const char *vuid_badrange) const {
bool skip = false;
if (firstQuery >= totalCount) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device),
vuid_badfirst, "firstQuery (%" PRIu32 ") greater than or equal to query pool count (%" PRIu32 ") for %s",
firstQuery, totalCount, report_data->FormatHandle(queryPool).c_str());
}
if ((firstQuery + queryCount) > totalCount) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device),
vuid_badrange, "Query range [%" PRIu32 ", %" PRIu32 ") goes beyond query pool count (%" PRIu32 ") for %s",
firstQuery, firstQuery + queryCount, totalCount, report_data->FormatHandle(queryPool).c_str());
}
return skip;
}
bool CoreChecks::PreCallValidateResetQueryPoolEXT(VkDevice device, VkQueryPool queryPool, uint32_t firstQuery,
uint32_t queryCount) const {
if (disabled.query_validation) return false;
bool skip = false;
if (!enabled_features.host_query_reset_features.hostQueryReset) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device),
"VUID-vkResetQueryPoolEXT-None-02665", "Host query reset not enabled for device");
}
const auto query_pool_state = GetQueryPoolState(queryPool);
if (query_pool_state) {
skip |= ValidateQueryRange(device, queryPool, query_pool_state->createInfo.queryCount, firstQuery, queryCount,
"VUID-vkResetQueryPoolEXT-firstQuery-02666", "VUID-vkResetQueryPoolEXT-firstQuery-02667");
}
return skip;
}
VkResult CoreChecks::CoreLayerCreateValidationCacheEXT(VkDevice device, const VkValidationCacheCreateInfoEXT *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkValidationCacheEXT *pValidationCache) {
*pValidationCache = ValidationCache::Create(pCreateInfo);
return *pValidationCache ? VK_SUCCESS : VK_ERROR_INITIALIZATION_FAILED;
}
void CoreChecks::CoreLayerDestroyValidationCacheEXT(VkDevice device, VkValidationCacheEXT validationCache,
const VkAllocationCallbacks *pAllocator) {
delete CastFromHandle<ValidationCache *>(validationCache);
}
VkResult CoreChecks::CoreLayerGetValidationCacheDataEXT(VkDevice device, VkValidationCacheEXT validationCache, size_t *pDataSize,
void *pData) {
size_t inSize = *pDataSize;
CastFromHandle<ValidationCache *>(validationCache)->Write(pDataSize, pData);
return (pData && *pDataSize != inSize) ? VK_INCOMPLETE : VK_SUCCESS;
}
VkResult CoreChecks::CoreLayerMergeValidationCachesEXT(VkDevice device, VkValidationCacheEXT dstCache, uint32_t srcCacheCount,
const VkValidationCacheEXT *pSrcCaches) {
bool skip = false;
auto dst = CastFromHandle<ValidationCache *>(dstCache);
VkResult result = VK_SUCCESS;
for (uint32_t i = 0; i < srcCacheCount; i++) {
auto src = CastFromHandle<const ValidationCache *>(pSrcCaches[i]);
if (src == dst) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_VALIDATION_CACHE_EXT, 0,
"VUID-vkMergeValidationCachesEXT-dstCache-01536",
"vkMergeValidationCachesEXT: dstCache (0x%" PRIx64 ") must not appear in pSrcCaches array.",
HandleToUint64(dstCache));
result = VK_ERROR_VALIDATION_FAILED_EXT;
}
if (!skip) {
dst->Merge(src);
}
}
return result;
}
bool CoreChecks::ValidateCmdSetDeviceMask(VkCommandBuffer commandBuffer, uint32_t deviceMask, const char *func_name) const {
bool skip = false;
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
skip |= ValidateCmd(cb_state, CMD_SETDEVICEMASK, func_name);
skip |= ValidateDeviceMaskToPhysicalDeviceCount(deviceMask, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdSetDeviceMask-deviceMask-00108");
skip |= ValidateDeviceMaskToZero(deviceMask, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(commandBuffer),
"VUID-vkCmdSetDeviceMask-deviceMask-00109");
skip |= ValidateDeviceMaskToCommandBuffer(cb_state, deviceMask, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdSetDeviceMask-deviceMask-00110");
if (cb_state->activeRenderPass) {
skip |= ValidateDeviceMaskToRenderPass(cb_state, deviceMask, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdSetDeviceMask-deviceMask-00111");
}
return skip;
}
bool CoreChecks::PreCallValidateCmdSetDeviceMask(VkCommandBuffer commandBuffer, uint32_t deviceMask) const {
return ValidateCmdSetDeviceMask(commandBuffer, deviceMask, "vkSetDeviceMask()");
}
bool CoreChecks::PreCallValidateCmdSetDeviceMaskKHR(VkCommandBuffer commandBuffer, uint32_t deviceMask) const {
return ValidateCmdSetDeviceMask(commandBuffer, deviceMask, "vkSetDeviceMaskKHR()");
}
bool CoreChecks::PreCallValidateGetSemaphoreCounterValueKHR(VkDevice device, VkSemaphore semaphore, uint64_t *pValue) const {
bool skip = false;
const auto *pSemaphore = GetSemaphoreState(semaphore);
if (pSemaphore && pSemaphore->type != VK_SEMAPHORE_TYPE_TIMELINE_KHR) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
HandleToUint64(semaphore), "VUID-vkGetSemaphoreCounterValueKHR-semaphore-03255",
"vkGetSemaphoreCounterValueKHR: semaphore %s must be of VK_SEMAPHORE_TYPE_TIMELINE_KHR type",
report_data->FormatHandle(semaphore).c_str());
}
return skip;
}
bool CoreChecks::ValidateQueryPoolStride(const std::string &vuid_not_64, const std::string &vuid_64, const VkDeviceSize stride,
const char *parameter_name, const uint64_t parameter_value,
const VkQueryResultFlags flags) const {
bool skip = false;
if (flags & VK_QUERY_RESULT_64_BIT) {
static const int condition_multiples = 0b0111;
if ((stride & condition_multiples) || (parameter_value & condition_multiples)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid_64,
"stride %" PRIx64 " or %s %" PRIx64 " is invalid.", stride, parameter_name, parameter_value);
}
} else {
static const int condition_multiples = 0b0011;
if ((stride & condition_multiples) || (parameter_value & condition_multiples)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid_not_64,
"stride %" PRIx64 " or %s %" PRIx64 " is invalid.", stride, parameter_name, parameter_value);
}
}
return skip;
}
bool CoreChecks::ValidateCmdDrawStrideWithStruct(VkCommandBuffer commandBuffer, const std::string &vuid, const uint32_t stride,
const char *struct_name, const uint32_t struct_size) const {
bool skip = false;
static const int condition_multiples = 0b0011;
if ((stride & condition_multiples) || (stride < struct_size)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), vuid, "stride %d is invalid or less than sizeof(%s) %d.", stride,
struct_name, struct_size);
}
return skip;
}
bool CoreChecks::ValidateCmdDrawStrideWithBuffer(VkCommandBuffer commandBuffer, const std::string &vuid, const uint32_t stride,
const char *struct_name, const uint32_t struct_size, const uint32_t drawCount,
const VkDeviceSize offset, const BUFFER_STATE *buffer_state) const {
bool skip = false;
uint64_t validation_value = stride * (drawCount - 1) + offset + struct_size;
if (validation_value > buffer_state->createInfo.size) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), vuid,
"stride[%d] * (drawCount[%d] - 1) + offset[%" PRIx64 "] + sizeof(%s)[%d] = %" PRIx64
" is greater than the size[%" PRIx64 "] of %s.",
stride, drawCount, offset, struct_name, struct_size, validation_value, buffer_state->createInfo.size,
report_data->FormatHandle(buffer_state->buffer).c_str());
}
return skip;
}
void PIPELINE_STATE::initGraphicsPipeline(const ValidationStateTracker *state_data, const VkGraphicsPipelineCreateInfo *pCreateInfo,
std::shared_ptr<const RENDER_PASS_STATE> &&rpstate) {
reset();
bool uses_color_attachment = false;
bool uses_depthstencil_attachment = false;
if (pCreateInfo->subpass < rpstate->createInfo.subpassCount) {
const auto &subpass = rpstate->createInfo.pSubpasses[pCreateInfo->subpass];
for (uint32_t i = 0; i < subpass.colorAttachmentCount; ++i) {
if (subpass.pColorAttachments[i].attachment != VK_ATTACHMENT_UNUSED) {
uses_color_attachment = true;
break;
}
}
if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
uses_depthstencil_attachment = true;
}
}
graphicsPipelineCI.initialize(pCreateInfo, uses_color_attachment, uses_depthstencil_attachment);
if (graphicsPipelineCI.pInputAssemblyState) {
topology_at_rasterizer = graphicsPipelineCI.pInputAssemblyState->topology;
}
stage_state.resize(pCreateInfo->stageCount);
for (uint32_t i = 0; i < pCreateInfo->stageCount; i++) {
const VkPipelineShaderStageCreateInfo *pPSSCI = &pCreateInfo->pStages[i];
this->duplicate_shaders |= this->active_shaders & pPSSCI->stage;
this->active_shaders |= pPSSCI->stage;
state_data->RecordPipelineShaderStage(pPSSCI, this, &stage_state[i]);
}
if (graphicsPipelineCI.pVertexInputState) {
const auto pVICI = graphicsPipelineCI.pVertexInputState;
if (pVICI->vertexBindingDescriptionCount) {
this->vertex_binding_descriptions_ = std::vector<VkVertexInputBindingDescription>(
pVICI->pVertexBindingDescriptions, pVICI->pVertexBindingDescriptions + pVICI->vertexBindingDescriptionCount);
this->vertex_binding_to_index_map_.reserve(pVICI->vertexBindingDescriptionCount);
for (uint32_t i = 0; i < pVICI->vertexBindingDescriptionCount; ++i) {
this->vertex_binding_to_index_map_[pVICI->pVertexBindingDescriptions[i].binding] = i;
}
}
if (pVICI->vertexAttributeDescriptionCount) {
this->vertex_attribute_descriptions_ = std::vector<VkVertexInputAttributeDescription>(
pVICI->pVertexAttributeDescriptions, pVICI->pVertexAttributeDescriptions + pVICI->vertexAttributeDescriptionCount);
for (uint32_t i = 0; i < pVICI->vertexAttributeDescriptionCount; ++i) {
const auto attribute_format = pVICI->pVertexAttributeDescriptions[i].format;
VkDeviceSize vtx_attrib_req_alignment = FormatElementSize(attribute_format);
if (FormatElementIsTexel(attribute_format)) {
vtx_attrib_req_alignment = SafeDivision(vtx_attrib_req_alignment, FormatChannelCount(attribute_format));
}
this->vertex_attribute_alignments_.push_back(vtx_attrib_req_alignment);
}
}
}
if (graphicsPipelineCI.pColorBlendState) {
const auto pCBCI = graphicsPipelineCI.pColorBlendState;
if (pCBCI->attachmentCount) {
this->attachments =
std::vector<VkPipelineColorBlendAttachmentState>(pCBCI->pAttachments, pCBCI->pAttachments + pCBCI->attachmentCount);
}
}
rp_state = rpstate;
}
void PIPELINE_STATE::initComputePipeline(const ValidationStateTracker *state_data, const VkComputePipelineCreateInfo *pCreateInfo) {
reset();
computePipelineCI.initialize(pCreateInfo);
switch (computePipelineCI.stage.stage) {
case VK_SHADER_STAGE_COMPUTE_BIT:
this->active_shaders |= VK_SHADER_STAGE_COMPUTE_BIT;
stage_state.resize(1);
state_data->RecordPipelineShaderStage(&pCreateInfo->stage, this, &stage_state[0]);
break;
default:
// TODO : Flag error
break;
}
}
void PIPELINE_STATE::initRayTracingPipelineNV(const ValidationStateTracker *state_data,
const VkRayTracingPipelineCreateInfoNV *pCreateInfo) {
reset();
raytracingPipelineCI.initialize(pCreateInfo);
stage_state.resize(pCreateInfo->stageCount);
for (uint32_t stage_index = 0; stage_index < pCreateInfo->stageCount; stage_index++) {
const auto &shader_stage = pCreateInfo->pStages[stage_index];
switch (shader_stage.stage) {
case VK_SHADER_STAGE_RAYGEN_BIT_NV:
this->active_shaders |= VK_SHADER_STAGE_RAYGEN_BIT_NV;
break;
case VK_SHADER_STAGE_ANY_HIT_BIT_NV:
this->active_shaders |= VK_SHADER_STAGE_ANY_HIT_BIT_NV;
break;
case VK_SHADER_STAGE_CLOSEST_HIT_BIT_NV:
this->active_shaders |= VK_SHADER_STAGE_CLOSEST_HIT_BIT_NV;
break;
case VK_SHADER_STAGE_MISS_BIT_NV:
this->active_shaders |= VK_SHADER_STAGE_MISS_BIT_NV;
break;
case VK_SHADER_STAGE_INTERSECTION_BIT_NV:
this->active_shaders |= VK_SHADER_STAGE_INTERSECTION_BIT_NV;
break;
case VK_SHADER_STAGE_CALLABLE_BIT_NV:
this->active_shaders |= VK_SHADER_STAGE_CALLABLE_BIT_NV;
break;
default:
// TODO : Flag error
break;
}
state_data->RecordPipelineShaderStage(&shader_stage, this, &stage_state[stage_index]);
}
}
| 1 | 12,273 | The VUIDs in this area are not great, but I think `ValidateQueryPoolStride` should probably be skipped if the query pool was created with type `VK_QUERY_TYPE_PERFORMANCE_QUERY`. VUID-02828 might be a better fit, but again, the existing VUIDs step on each other so it requires a bit of interpretation. | KhronosGroup-Vulkan-ValidationLayers | cpp |
@@ -74,11 +74,12 @@ namespace Nethermind.Store
db[key.ToBigEndianByteArrayWithoutLeadingZeros()] = value;
}
- public static byte[] Get(this IDb db, long key)
- {
- return db[key.ToBigEndianByteArrayWithoutLeadingZeros()];
- }
-
+ public static byte[] Get(this IDb db, long key) => db[key.ToBigEndianByteArrayWithoutLeadingZeros()];
+
+ public static byte[] ToDbKey(this long key) => key.ToBigEndianByteArrayWithoutLeadingZeros();
+
+ public static byte[] ToDbKey(this Keccak key) => key.Bytes;
+
public static void Delete(this IDb db, long key)
{
db.Remove(key.ToBigEndianByteArrayWithoutLeadingZeros()); | 1 | // Copyright (c) 2018 Demerzel Solutions Limited
// This file is part of the Nethermind library.
//
// The Nethermind library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The Nethermind library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the Nethermind. If not, see <http://www.gnu.org/licenses/>.
using System;
using System.Numerics;
using Nethermind.Core.Crypto;
using Nethermind.Core.Extensions;
namespace Nethermind.Store
{
public static class DbExtensions
{
public static void Set(this IDb db, Keccak key, byte[] value)
{
db[key.Bytes] = value;
}
public static byte[] Get(this IDb db, Keccak key)
{
#if DEBUG
if (key == Keccak.OfAnEmptyString)
{
throw new InvalidOperationException();
}
#endif
return db[key.Bytes];
}
public static Span<byte> GetSpan(this IDbWithSpan db, Keccak key)
{
#if DEBUG
if (key == Keccak.OfAnEmptyString)
{
throw new InvalidOperationException();
}
#endif
return db.GetSpan(key.Bytes);
}
public static bool KeyExists(this IDb db, Keccak key)
{
#if DEBUG
if (key == Keccak.OfAnEmptyString)
{
throw new InvalidOperationException();
}
#endif
return db.KeyExists(key.Bytes);
}
public static void Delete(this IDb db, Keccak key)
{
db.Remove(key.Bytes);
}
public static void Set(this IDb db, long key, byte[] value)
{
db[key.ToBigEndianByteArrayWithoutLeadingZeros()] = value;
}
public static byte[] Get(this IDb db, long key)
{
return db[key.ToBigEndianByteArrayWithoutLeadingZeros()];
}
public static void Delete(this IDb db, long key)
{
db.Remove(key.ToBigEndianByteArrayWithoutLeadingZeros());
}
}
} | 1 | 23,311 | Iguess you should use ToDbKey here | NethermindEth-nethermind | .cs |
@@ -179,16 +179,7 @@ func (x *blockIndexer) DeleteTipBlock(blk *block.Block) error {
func (x *blockIndexer) Height() (uint64, error) {
x.mutex.RLock()
defer x.mutex.RUnlock()
-
- index, err := db.GetCountingIndex(x.kvStore, totalBlocksBucket)
- if err != nil {
- if errors.Cause(err) == db.ErrBucketNotExist || errors.Cause(err) == db.ErrNotExist {
- // counting index does not exist yet
- return 0, nil
- }
- return 0, err
- }
- return index.Size() - 1, nil
+ return x.tbk.Size() - 1, nil
}
// GetBlockHash returns the block hash by height | 1 | // Copyright (c) 2019 IoTeX Foundation
// This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no
// warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent
// permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache
// License 2.0 that can be found in the LICENSE file.
package blockindex
import (
"bytes"
"context"
"math/big"
"sync"
"github.com/iotexproject/go-pkgs/hash"
"github.com/pkg/errors"
"github.com/iotexproject/iotex-address/address"
"github.com/iotexproject/iotex-core/action"
"github.com/iotexproject/iotex-core/blockchain/block"
"github.com/iotexproject/iotex-core/db"
"github.com/iotexproject/iotex-core/db/batch"
"github.com/iotexproject/iotex-core/pkg/util/byteutil"
)
// the NS/bucket name here are used in index.db, which is separate from chain.db
// still we use 2-byte NS/bucket name here, to clearly differentiate from those (3-byte) in BlockDAO
const (
// first 12-byte of hash is cut off, only last 20-byte is written to DB to reduce storage
hashOffset = 12
blockHashToHeightNS = "hh"
actionToBlockHashNS = "ab"
)
var (
totalBlocksBucket = []byte("bk")
totalActionsBucket = []byte("ac")
// ErrActionIndexNA indicates action index is not supported
ErrActionIndexNA = errors.New("action index not supported")
)
type (
addrIndex map[hash.Hash160]db.CountingIndex
// Indexer is the interface for block indexer
Indexer interface {
Start(context.Context) error
Stop(context.Context) error
PutBlock(context.Context, *block.Block) error
PutBlocks([]*block.Block) error
DeleteTipBlock(*block.Block) error
Height() (uint64, error)
GetBlockHash(height uint64) (hash.Hash256, error)
GetBlockHeight(hash hash.Hash256) (uint64, error)
GetBlockIndex(uint64) (*blockIndex, error)
GetActionIndex([]byte) (*actionIndex, error)
GetTotalActions() (uint64, error)
GetActionHashFromIndex(uint64, uint64) ([][]byte, error)
GetActionCountByAddress(hash.Hash160) (uint64, error)
GetActionsByAddress(hash.Hash160, uint64, uint64) ([][]byte, error)
}
// blockIndexer implements the Indexer interface
blockIndexer struct {
mutex sync.RWMutex
genesisHash hash.Hash256
kvStore db.KVStoreWithRange
batch batch.KVStoreBatch
dirtyAddr addrIndex
tbk db.CountingIndex
tac db.CountingIndex
}
)
// NewIndexer creates a new indexer
func NewIndexer(kv db.KVStore, genesisHash hash.Hash256) (Indexer, error) {
if kv == nil {
return nil, errors.New("empty kvStore")
}
kvRange, ok := kv.(db.KVStoreWithRange)
if !ok {
return nil, errors.New("indexer can only be created from KVStoreWithRange")
}
x := blockIndexer{
kvStore: kvRange,
batch: batch.NewBatch(),
dirtyAddr: make(addrIndex),
genesisHash: genesisHash,
}
return &x, nil
}
// Start starts the indexer
func (x *blockIndexer) Start(ctx context.Context) error {
if err := x.kvStore.Start(ctx); err != nil {
return err
}
// create the total block and action index
var err error
if x.tbk, err = db.NewCountingIndexNX(x.kvStore, totalBlocksBucket); err != nil {
return err
}
if x.tbk.Size() == 0 {
// insert genesis block
if err = x.tbk.Add((&blockIndex{
x.genesisHash[:],
0,
big.NewInt(0)}).Serialize(), false); err != nil {
return err
}
}
x.tac, err = db.NewCountingIndexNX(x.kvStore, totalActionsBucket)
return err
}
// Stop stops the indexer
func (x *blockIndexer) Stop(ctx context.Context) error {
return x.kvStore.Stop(ctx)
}
// PutBlocks writes the batch to DB
func (x *blockIndexer) PutBlocks(blks []*block.Block) error {
x.mutex.Lock()
defer x.mutex.Unlock()
for _, blk := range blks {
if err := x.putBlock(blk); err != nil {
// TODO: Revert changes
return err
}
}
return x.commit()
}
// PutBlock index the block
func (x *blockIndexer) PutBlock(_ context.Context, blk *block.Block) error {
x.mutex.Lock()
defer x.mutex.Unlock()
if err := x.putBlock(blk); err != nil {
return err
}
return x.commit()
}
// DeleteBlock deletes a block's index
func (x *blockIndexer) DeleteTipBlock(blk *block.Block) error {
x.mutex.Lock()
defer x.mutex.Unlock()
// the block to be deleted must be exactly current top, otherwise counting index would not work correctly
height := blk.Height()
if height != x.tbk.Size()-1 {
return errors.Wrapf(db.ErrInvalid, "wrong block height %d, expecting %d", height, x.tbk.Size()-1)
}
// delete hash --> height
hash := blk.HashBlock()
x.batch.Delete(blockHashToHeightNS, hash[hashOffset:], "failed to delete block at height %d", height)
// delete from total block index
if err := x.tbk.Revert(1); err != nil {
return err
}
// delete action index
for _, selp := range blk.Actions {
actHash := selp.Hash()
x.batch.Delete(actionToBlockHashNS, actHash[hashOffset:], "failed to delete action hash %x", actHash)
if err := x.indexAction(actHash, selp, false); err != nil {
return err
}
}
// delete from total action index
if err := x.tac.Revert(uint64(len(blk.Actions))); err != nil {
return err
}
return x.commit()
}
// Height return the blockchain height
func (x *blockIndexer) Height() (uint64, error) {
x.mutex.RLock()
defer x.mutex.RUnlock()
index, err := db.GetCountingIndex(x.kvStore, totalBlocksBucket)
if err != nil {
if errors.Cause(err) == db.ErrBucketNotExist || errors.Cause(err) == db.ErrNotExist {
// counting index does not exist yet
return 0, nil
}
return 0, err
}
return index.Size() - 1, nil
}
// GetBlockHash returns the block hash by height
func (x *blockIndexer) GetBlockHash(height uint64) (hash.Hash256, error) {
index, err := x.GetBlockIndex(height)
if err != nil {
return hash.ZeroHash256, errors.Wrap(err, "failed to get block hash")
}
return hash.BytesToHash256(index.Hash()), nil
}
// GetBlockHeight returns the block height by hash
func (x *blockIndexer) GetBlockHeight(hash hash.Hash256) (uint64, error) {
x.mutex.RLock()
defer x.mutex.RUnlock()
value, err := x.kvStore.Get(blockHashToHeightNS, hash[hashOffset:])
if err != nil {
return 0, errors.Wrap(err, "failed to get block height")
}
if len(value) == 0 {
return 0, errors.Wrapf(db.ErrNotExist, "height missing for block with hash = %x", hash)
}
return byteutil.BytesToUint64BigEndian(value), nil
}
// GetBlockIndex return the index of block
func (x *blockIndexer) GetBlockIndex(height uint64) (*blockIndex, error) {
x.mutex.RLock()
defer x.mutex.RUnlock()
v, err := x.tbk.Get(height)
if err != nil {
return nil, err
}
b := &blockIndex{}
if err := b.Deserialize(v); err != nil {
return nil, err
}
return b, nil
}
// GetActionIndex return the index of action
func (x *blockIndexer) GetActionIndex(h []byte) (*actionIndex, error) {
x.mutex.RLock()
defer x.mutex.RUnlock()
v, err := x.kvStore.Get(actionToBlockHashNS, h[hashOffset:])
if err != nil {
return nil, err
}
a := &actionIndex{}
if err := a.Deserialize(v); err != nil {
return nil, err
}
return a, nil
}
// GetTotalActions return total number of all actions
func (x *blockIndexer) GetTotalActions() (uint64, error) {
x.mutex.RLock()
defer x.mutex.RUnlock()
total, err := db.GetCountingIndex(x.kvStore, totalActionsBucket)
if err != nil {
return 0, err
}
return total.Size(), nil
}
// GetActionHashFromIndex return hash of actions[start, start+count)
func (x *blockIndexer) GetActionHashFromIndex(start, count uint64) ([][]byte, error) {
x.mutex.RLock()
defer x.mutex.RUnlock()
return x.tac.Range(start, count)
}
// GetActionCountByAddress return total number of actions of an address
func (x *blockIndexer) GetActionCountByAddress(addrBytes hash.Hash160) (uint64, error) {
x.mutex.RLock()
defer x.mutex.RUnlock()
addr, err := db.GetCountingIndex(x.kvStore, addrBytes[:])
if err != nil {
if errors.Cause(err) == db.ErrBucketNotExist || errors.Cause(err) == db.ErrNotExist {
return 0, nil
}
return 0, err
}
return addr.Size(), nil
}
// GetActionsByAddress return hash of an address's actions[start, start+count)
func (x *blockIndexer) GetActionsByAddress(addrBytes hash.Hash160, start, count uint64) ([][]byte, error) {
x.mutex.RLock()
defer x.mutex.RUnlock()
addr, err := db.GetCountingIndex(x.kvStore, addrBytes[:])
if err != nil {
return nil, err
}
total := addr.Size()
if start >= total {
return nil, errors.Wrapf(db.ErrInvalid, "start = %d >= total = %d", start, total)
}
if start+count > total {
count = total - start
}
return addr.Range(start, count)
}
func (x *blockIndexer) putBlock(blk *block.Block) error {
// the block to be indexed must be exactly current top + 1, otherwise counting index would not work correctly
height := blk.Height()
if height != x.tbk.Size() {
return errors.Wrapf(db.ErrInvalid, "wrong block height %d, expecting %d", height, x.tbk.Size())
}
// index hash --> height
hash := blk.HashBlock()
x.batch.Put(blockHashToHeightNS, hash[hashOffset:], byteutil.Uint64ToBytesBigEndian(height), "failed to put hash -> height mapping")
// index height --> block hash, number of actions, and total transfer amount
bd := &blockIndex{
hash: hash[:],
numAction: uint32(len(blk.Actions)),
tsfAmount: blk.CalculateTransferAmount()}
if err := x.tbk.Add(bd.Serialize(), true); err != nil {
return errors.Wrapf(err, "failed to put block %d index", height)
}
// store height of the block, so getReceiptByActionHash() can use height to directly pull receipts
ad := (&actionIndex{
blkHeight: blk.Height()}).Serialize()
// index actions in the block
for _, selp := range blk.Actions {
actHash := selp.Hash()
x.batch.Put(actionToBlockHashNS, actHash[hashOffset:], ad, "failed to put action hash %x", actHash)
// add to total account index
if err := x.tac.Add(actHash[:], true); err != nil {
return err
}
if err := x.indexAction(actHash, selp, true); err != nil {
return err
}
}
return nil
}
// commit writes the changes
func (x *blockIndexer) commit() error {
var commitErr error
for k, v := range x.dirtyAddr {
if commitErr == nil {
if err := v.Commit(); err != nil {
commitErr = err
}
}
delete(x.dirtyAddr, k)
}
if commitErr != nil {
return commitErr
}
// total block and total action index
if err := x.tbk.Commit(); err != nil {
return err
}
if err := x.tac.Commit(); err != nil {
return err
}
return x.kvStore.WriteBatch(x.batch)
}
// getIndexerForAddr returns the counting indexer for an address
// if batch is true, the indexer will be placed into a dirty map, to be committed later
func (x *blockIndexer) getIndexerForAddr(addr []byte, batch bool) (db.CountingIndex, error) {
if !batch {
return db.NewCountingIndexNX(x.kvStore, addr)
}
address := hash.BytesToHash160(addr)
indexer, ok := x.dirtyAddr[address]
if !ok {
// create indexer for addr if not exist
var err error
indexer, err = db.NewCountingIndexNX(x.kvStore, addr)
if err != nil {
return nil, err
}
x.dirtyAddr[address] = indexer
}
return indexer, nil
}
// indexAction builds index for an action
func (x *blockIndexer) indexAction(actHash hash.Hash256, elp action.SealedEnvelope, insert bool) error {
// add to sender's index
callerAddrBytes := elp.SrcPubkey().Hash()
sender, err := x.getIndexerForAddr(callerAddrBytes, insert)
if err != nil {
return err
}
if insert {
err = sender.Add(actHash[:], insert)
} else {
err = sender.Revert(1)
}
if err != nil {
return err
}
dst, ok := elp.Destination()
if !ok || dst == "" {
return nil
}
dstAddr, err := address.FromString(dst)
if err != nil {
return err
}
dstAddrBytes := dstAddr.Bytes()
if bytes.Compare(dstAddrBytes, callerAddrBytes) == 0 {
// recipient is same as sender
return nil
}
// add to recipient's index
recipient, err := x.getIndexerForAddr(dstAddrBytes, insert)
if err != nil {
return err
}
if insert {
err = recipient.Add(actHash[:], insert)
} else {
err = recipient.Revert(1)
}
return err
}
| 1 | 21,652 | x.tbk is the "index" below, and is done in Start() | iotexproject-iotex-core | go |
@@ -8,6 +8,7 @@ namespace Datadog.Trace.Util
/// </summary>
internal static class DomainMetadata
{
+ private const string IsAppInsightKey = "DD_IsAppInsight";
private const string UnknownName = "unknown";
private static Process _currentProcess;
private static bool _processDataPoisoned; | 1 | using System;
using System.Diagnostics;
namespace Datadog.Trace.Util
{
/// <summary>
/// Dedicated helper class for consistently referencing Process and AppDomain information.
/// </summary>
internal static class DomainMetadata
{
private const string UnknownName = "unknown";
private static Process _currentProcess;
private static bool _processDataPoisoned;
private static bool _domainDataPoisoned;
static DomainMetadata()
{
TrySetProcess();
}
public static string ProcessName
{
get
{
try
{
return !_processDataPoisoned ? _currentProcess.ProcessName : UnknownName;
}
catch
{
_processDataPoisoned = true;
return UnknownName;
}
}
}
public static string MachineName
{
get
{
try
{
return !_processDataPoisoned ? _currentProcess.MachineName : UnknownName;
}
catch
{
_processDataPoisoned = true;
return UnknownName;
}
}
}
public static int ProcessId
{
get
{
try
{
return !_processDataPoisoned ? _currentProcess.Id : -1;
}
catch
{
_processDataPoisoned = true;
return -1;
}
}
}
public static string AppDomainName
{
get
{
try
{
return !_domainDataPoisoned ? AppDomain.CurrentDomain.FriendlyName : UnknownName;
}
catch
{
_domainDataPoisoned = true;
return UnknownName;
}
}
}
public static int AppDomainId
{
get
{
try
{
return !_domainDataPoisoned ? AppDomain.CurrentDomain.Id : -1;
}
catch
{
_domainDataPoisoned = true;
return -1;
}
}
}
public static bool ShouldAvoidAppDomain()
{
if (AppDomainName.IndexOf("ApplicationInsights", StringComparison.OrdinalIgnoreCase) >= 0)
{
return true;
}
return false;
}
private static void TrySetProcess()
{
try
{
if (!_processDataPoisoned && _currentProcess == null)
{
_currentProcess = Process.GetCurrentProcess();
}
}
catch
{
_processDataPoisoned = true;
}
}
}
}
| 1 | 17,184 | Do we already have a convention for this? If not, would we consider "DataDog.IsAppInsights". And then use "DataDog." prefix for all this settings, environment variables etc..? Such settings are, essentially, public APIs because they may conflict with customer data. Regardless of that , AppInsights has an s at the end :) | DataDog-dd-trace-dotnet | .cs |
@@ -95,6 +95,16 @@ static int cb_firehose_init(struct flb_output_instance *ins,
ctx->time_key_format = DEFAULT_TIME_KEY_FORMAT;
}
+ tmp = flb_output_get_property("log_key", ins);
+ if (tmp) {
+ ctx->log_key = tmp;
+ }
+
+ if (ctx->log_key && ctx->time_key) {
+ flb_plg_error(ctx->ins, "'time_key' and 'log_key' can not be used together");
+ goto error;
+ }
+
tmp = flb_output_get_property("endpoint", ins);
if (tmp) {
ctx->custom_endpoint = FLB_TRUE; | 1 | /* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
/* Fluent Bit
* ==========
* Copyright (C) 2019-2020 The Fluent Bit Authors
* Copyright (C) 2015-2018 Treasure Data Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <fluent-bit/flb_compat.h>
#include <fluent-bit/flb_info.h>
#include <fluent-bit/flb_output.h>
#include <fluent-bit/flb_utils.h>
#include <fluent-bit/flb_slist.h>
#include <fluent-bit/flb_time.h>
#include <fluent-bit/flb_pack.h>
#include <fluent-bit/flb_config_map.h>
#include <fluent-bit/flb_output_plugin.h>
#include <fluent-bit/flb_sds.h>
#include <fluent-bit/flb_aws_credentials.h>
#include <fluent-bit/flb_aws_util.h>
#include <fluent-bit/flb_mem.h>
#include <fluent-bit/flb_http_client.h>
#include <fluent-bit/flb_utils.h>
#include <monkey/mk_core.h>
#include <msgpack.h>
#include <string.h>
#include <stdio.h>
#include "firehose.h"
#include "firehose_api.h"
static struct flb_aws_header content_type_header = {
.key = "Content-Type",
.key_len = 12,
.val = "application/x-amz-json-1.1",
.val_len = 26,
};
static int cb_firehose_init(struct flb_output_instance *ins,
struct flb_config *config, void *data)
{
const char *tmp;
char *session_name = NULL;
struct flb_firehose *ctx = NULL;
int ret;
(void) config;
(void) data;
ctx = flb_calloc(1, sizeof(struct flb_firehose));
if (!ctx) {
flb_errno();
return -1;
}
ctx->ins = ins;
/* Populate context with config map defaults and incoming properties */
ret = flb_output_config_map_set(ins, (void *) ctx);
if (ret == -1) {
flb_plg_error(ctx->ins, "configuration error");
goto error;
}
tmp = flb_output_get_property("delivery_stream", ins);
if (tmp) {
ctx->delivery_stream = tmp;
} else {
flb_plg_error(ctx->ins, "'delivery_stream' is a required field");
goto error;
}
tmp = flb_output_get_property("time_key", ins);
if (tmp) {
ctx->time_key = tmp;
}
tmp = flb_output_get_property("time_key_format", ins);
if (tmp) {
ctx->time_key_format = tmp;
} else {
ctx->time_key_format = DEFAULT_TIME_KEY_FORMAT;
}
tmp = flb_output_get_property("endpoint", ins);
if (tmp) {
ctx->custom_endpoint = FLB_TRUE;
ctx->endpoint = removeProtocol((char *) tmp, "https://");
}
else {
ctx->custom_endpoint = FLB_FALSE;
}
tmp = flb_output_get_property("sts_endpoint", ins);
if (tmp) {
ctx->sts_endpoint = (char *) tmp;
}
tmp = flb_output_get_property("log_key", ins);
if (tmp) {
ctx->log_key = tmp;
}
tmp = flb_output_get_property("region", ins);
if (tmp) {
ctx->region = tmp;
} else {
flb_plg_error(ctx->ins, "'region' is a required field");
goto error;
}
tmp = flb_output_get_property("role_arn", ins);
if (tmp) {
ctx->role_arn = tmp;
}
/* one tls instance for provider, one for cw client */
ctx->cred_tls.context = flb_tls_context_new(FLB_TRUE,
ins->tls_debug,
ins->tls_vhost,
ins->tls_ca_path,
ins->tls_ca_file,
ins->tls_crt_file,
ins->tls_key_file,
ins->tls_key_passwd);
if (!ctx->cred_tls.context) {
flb_plg_error(ctx->ins, "Failed to create tls context");
goto error;
}
ctx->client_tls.context = flb_tls_context_new(FLB_TRUE,
ins->tls_debug,
ins->tls_vhost,
ins->tls_ca_path,
ins->tls_ca_file,
ins->tls_crt_file,
ins->tls_key_file,
ins->tls_key_passwd);
if (!ctx->client_tls.context) {
flb_plg_error(ctx->ins, "Failed to create tls context");
goto error;
}
ctx->aws_provider = flb_standard_chain_provider_create(config,
&ctx->cred_tls,
(char *) ctx->region,
ctx->sts_endpoint,
NULL,
flb_aws_client_generator());
if (!ctx->aws_provider) {
flb_plg_error(ctx->ins, "Failed to create AWS Credential Provider");
goto error;
}
if(ctx->role_arn) {
/* set up sts assume role provider */
session_name = flb_sts_session_name();
if (!session_name) {
flb_plg_error(ctx->ins,
"Failed to generate random STS session name");
goto error;
}
/* STS provider needs yet another separate TLS instance */
ctx->sts_tls.context = flb_tls_context_new(FLB_TRUE,
ins->tls_debug,
ins->tls_vhost,
ins->tls_ca_path,
ins->tls_ca_file,
ins->tls_crt_file,
ins->tls_key_file,
ins->tls_key_passwd);
if (!ctx->sts_tls.context) {
flb_errno();
goto error;
}
ctx->base_aws_provider = ctx->aws_provider;
ctx->aws_provider = flb_sts_provider_create(config,
&ctx->sts_tls,
ctx->base_aws_provider,
NULL,
(char *) ctx->role_arn,
session_name,
(char *) ctx->region,
ctx->sts_endpoint,
NULL,
flb_aws_client_generator());
if (!ctx->aws_provider) {
flb_plg_error(ctx->ins,
"Failed to create AWS STS Credential Provider");
goto error;
}
/* session name can freed after provider is created */
flb_free(session_name);
session_name = NULL;
}
/* initialize credentials and set to sync mode */
ctx->aws_provider->provider_vtable->sync(ctx->aws_provider);
ctx->aws_provider->provider_vtable->init(ctx->aws_provider);
if (ctx->endpoint == NULL) {
ctx->endpoint = flb_aws_endpoint("firehose", (char *) ctx->region);
if (!ctx->endpoint) {
goto error;
}
}
struct flb_aws_client_generator *generator = flb_aws_client_generator();
ctx->firehose_client = generator->create();
if (!ctx->firehose_client) {
goto error;
}
ctx->firehose_client->name = "firehose_client";
ctx->firehose_client->has_auth = FLB_TRUE;
ctx->firehose_client->provider = ctx->aws_provider;
ctx->firehose_client->region = (char *) ctx->region;
ctx->firehose_client->service = "firehose";
ctx->firehose_client->port = 443;
ctx->firehose_client->flags = 0;
ctx->firehose_client->proxy = NULL;
ctx->firehose_client->static_headers = &content_type_header;
ctx->firehose_client->static_headers_len = 1;
struct flb_upstream *upstream = flb_upstream_create(config, ctx->endpoint,
443, FLB_IO_TLS,
&ctx->client_tls);
if (!upstream) {
flb_plg_error(ctx->ins, "Connection initialization error");
goto error;
}
ctx->firehose_client->upstream = upstream;
ctx->firehose_client->host = ctx->endpoint;
/* Export context */
flb_output_set_context(ins, ctx);
return 0;
error:
flb_free(session_name);
flb_plg_error(ctx->ins, "Initialization failed");
flb_firehose_ctx_destroy(ctx);
return -1;
}
struct flush *new_flush_buffer()
{
struct flush *buf;
buf = flb_calloc(1, sizeof(struct flush));
if (!buf) {
flb_errno();
return NULL;
}
buf->tmp_buf = flb_malloc(sizeof(char) * PUT_RECORD_BATCH_PAYLOAD_SIZE);
if (!buf->tmp_buf) {
flb_errno();
flush_destroy(buf);
return NULL;
}
buf->tmp_buf_size = PUT_RECORD_BATCH_PAYLOAD_SIZE;
buf->events = flb_malloc(sizeof(struct event) * MAX_EVENTS_PER_PUT);
if (!buf->events) {
flb_errno();
flush_destroy(buf);
return NULL;
}
buf->events_capacity = MAX_EVENTS_PER_PUT;
return buf;
}
static void cb_firehose_flush(const void *data, size_t bytes,
const char *tag, int tag_len,
struct flb_input_instance *i_ins,
void *out_context,
struct flb_config *config)
{
struct flb_firehose *ctx = out_context;
int ret;
struct flush *buf;
(void) i_ins;
(void) config;
buf = new_flush_buffer();
if (!buf) {
flb_plg_error(ctx->ins, "Failed to construct flush buffer");
FLB_OUTPUT_RETURN(FLB_RETRY);
}
ret = process_and_send_records(ctx, buf, data, bytes);
if (ret < 0) {
flb_plg_error(ctx->ins, "Failed to send records");
flush_destroy(buf);
FLB_OUTPUT_RETURN(FLB_RETRY);
}
flb_plg_info(ctx->ins, "Processed %d records, sent %d to %s",
buf->records_processed, buf->records_sent, ctx->delivery_stream);
flush_destroy(buf);
FLB_OUTPUT_RETURN(FLB_OK);
}
void flb_firehose_ctx_destroy(struct flb_firehose *ctx)
{
if (ctx != NULL) {
if (ctx->base_aws_provider) {
flb_aws_provider_destroy(ctx->base_aws_provider);
}
if (ctx->aws_provider) {
flb_aws_provider_destroy(ctx->aws_provider);
}
if (ctx->cred_tls.context) {
flb_tls_context_destroy(ctx->cred_tls.context);
}
if (ctx->sts_tls.context) {
flb_tls_context_destroy(ctx->sts_tls.context);
}
if (ctx->client_tls.context) {
flb_tls_context_destroy(ctx->client_tls.context);
}
if (ctx->firehose_client) {
flb_aws_client_destroy(ctx->firehose_client);
}
if (ctx->custom_endpoint == FLB_FALSE) {
flb_free(ctx->endpoint);
}
flb_free(ctx);
}
}
static int cb_firehose_exit(void *data, struct flb_config *config)
{
struct flb_firehose *ctx = data;
flb_firehose_ctx_destroy(ctx);
return 0;
}
/* Configuration properties map */
static struct flb_config_map config_map[] = {
{
FLB_CONFIG_MAP_STR, "region", NULL,
0, FLB_FALSE, 0,
"The AWS region of your delivery stream"
},
{
FLB_CONFIG_MAP_STR, "delivery_stream", NULL,
0, FLB_FALSE, 0,
"Firehose delivery stream name"
},
{
FLB_CONFIG_MAP_STR, "time_key", NULL,
0, FLB_FALSE, 0,
"Add the timestamp to the record under this key. By default the timestamp "
"from Fluent Bit will not be added to records sent to Kinesis."
},
{
FLB_CONFIG_MAP_STR, "time_key_format", NULL,
0, FLB_FALSE, 0,
"strftime compliant format string for the timestamp; for example, "
"the default is '%Y-%m-%dT%H:%M:%S'. This option is used with time_key. "
},
{
FLB_CONFIG_MAP_STR, "role_arn", NULL,
0, FLB_FALSE, 0,
"ARN of an IAM role to assume (ex. for cross account access)."
},
{
FLB_CONFIG_MAP_STR, "endpoint", NULL,
0, FLB_FALSE, 0,
"Specify a custom endpoint for the Firehose API"
},
{
FLB_CONFIG_MAP_STR, "sts_endpoint", NULL,
0, FLB_FALSE, 0,
"Custom endpoint for the STS API."
},
/* EOF */
{0}
};
/* Plugin registration */
struct flb_output_plugin out_kinesis_firehose_plugin = {
.name = "kinesis_firehose",
.description = "Send logs to Amazon Kinesis Firehose",
.cb_init = cb_firehose_init,
.cb_flush = cb_firehose_flush,
.cb_exit = cb_firehose_exit,
.flags = 0,
/* Configuration */
.config_map = config_map,
};
| 1 | 12,909 | this assignment is not necessary if the offsetof() is used in the configmap | fluent-fluent-bit | c |
@@ -268,11 +268,6 @@ func (p *Protocol) claimFromAccount(sm protocol.StateManager, addr address.Addre
balance := big.NewInt(0).Sub(acc.balance, amount)
if balance.Cmp(big.NewInt(0)) < 0 {
return errors.New("no enough available balance")
- } else if balance.Cmp(big.NewInt(0)) == 0 {
- // If the account balance is cleared, delete if from the store
- if err := p.deleteState(sm, accKey); err != nil {
- return err
- }
} else {
acc.balance = balance
if err := p.putState(sm, accKey, &acc); err != nil { | 1 | // Copyright (c) 2019 IoTeX
// This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no
// warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent
// permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache
// License 2.0 that can be found in the LICENSE file.
package rewarding
import (
"context"
"math/big"
"github.com/golang/protobuf/proto"
"github.com/pkg/errors"
"github.com/iotexproject/iotex-core/action/protocol"
accountutil "github.com/iotexproject/iotex-core/action/protocol/account/util"
"github.com/iotexproject/iotex-core/action/protocol/rewarding/rewardingpb"
"github.com/iotexproject/iotex-core/address"
"github.com/iotexproject/iotex-core/pkg/enc"
"github.com/iotexproject/iotex-core/pkg/log"
"github.com/iotexproject/iotex-core/state"
)
// rewardHistory is the dummy struct to record a reward. Only key matters.
type rewardHistory struct{}
// Serialize serializes reward history state into bytes
func (b rewardHistory) Serialize() ([]byte, error) {
gen := rewardingpb.RewardHistory{}
return proto.Marshal(&gen)
}
// Deserialize deserializes bytes into reward history state
func (b *rewardHistory) Deserialize(data []byte) error { return nil }
// rewardHistory stores the unclaimed balance of an account
type rewardAccount struct {
balance *big.Int
}
// Serialize serializes account state into bytes
func (a rewardAccount) Serialize() ([]byte, error) {
gen := rewardingpb.Account{
Balance: a.balance.String(),
}
return proto.Marshal(&gen)
}
// Deserialize deserializes bytes into account state
func (a *rewardAccount) Deserialize(data []byte) error {
gen := rewardingpb.Account{}
if err := proto.Unmarshal(data, &gen); err != nil {
return err
}
balance, ok := big.NewInt(0).SetString(gen.Balance, 10)
if !ok {
return errors.New("failed to set reward account balance")
}
a.balance = balance
return nil
}
// GrantBlockReward grants the block reward (token) to the block producer
func (p *Protocol) GrantBlockReward(
ctx context.Context,
sm protocol.StateManager,
) error {
raCtx := protocol.MustGetRunActionsCtx(ctx)
if err := p.assertNoRewardYet(sm, blockRewardHistoryKeyPrefix, raCtx.BlockHeight); err != nil {
return err
}
// Get the reward address for the block producer
epochNum := p.rp.GetEpochNum(raCtx.BlockHeight)
candidates, err := p.cm.CandidatesByHeight(p.rp.GetEpochHeight(epochNum))
if err != nil {
return err
}
producerAddrStr := raCtx.Producer.String()
rewardAddrStr := ""
for _, candidate := range candidates {
if candidate.Address == producerAddrStr {
rewardAddrStr = candidate.RewardAddress
break
}
}
// If reward address doesn't exist, do nothing
if rewardAddrStr == "" {
log.S().Warnf("Producer %s doesn't have a reward address", producerAddrStr)
return nil
}
rewardAddr, err := address.FromString(rewardAddrStr)
a := admin{}
if err := p.state(sm, adminKey, &a); err != nil {
return err
}
if err := p.updateAvailableBalance(sm, a.blockReward); err != nil {
return err
}
if err != nil {
return err
}
if err := p.grantToAccount(sm, rewardAddr, a.blockReward); err != nil {
return err
}
return p.updateRewardHistory(sm, blockRewardHistoryKeyPrefix, raCtx.BlockHeight)
}
// GrantEpochReward grants the epoch reward (token) to all beneficiaries of a epoch
func (p *Protocol) GrantEpochReward(
ctx context.Context,
sm protocol.StateManager,
) error {
raCtx := protocol.MustGetRunActionsCtx(ctx)
epochNum := p.rp.GetEpochNum(raCtx.BlockHeight)
if err := p.assertNoRewardYet(sm, epochRewardHistoryKeyPrefix, epochNum); err != nil {
return err
}
if err := p.assertLastBlockInEpoch(raCtx.BlockHeight, epochNum); err != nil {
return err
}
a := admin{}
if err := p.state(sm, adminKey, &a); err != nil {
return err
}
// We need to consistently use the votes on of first block height in this epoch
candidates, err := p.cm.CandidatesByHeight(p.rp.GetEpochHeight(epochNum))
if err != nil {
return err
}
// Get unqualified delegate list
uqd, err := p.unqualifiedDelegates(raCtx.Producer, epochNum, a.productivityThreshold)
if err != nil {
return err
}
addrs, amounts, err := p.splitEpochReward(sm, candidates, a.epochReward, a.numDelegatesForEpochReward, uqd)
if err != nil {
return err
}
actualTotalReward := big.NewInt(0)
for i := range addrs {
// If reward address doesn't exist, do nothing
if addrs[i] == nil {
continue
}
if err := p.grantToAccount(sm, addrs[i], amounts[i]); err != nil {
return err
}
actualTotalReward = big.NewInt(0).Add(actualTotalReward, amounts[i])
}
// Reward additional bootstrap bonus
if epochNum <= a.foundationBonusLastEpoch {
l := uint64(len(candidates))
if l > a.numDelegatesForFoundationBonus {
l = a.numDelegatesForFoundationBonus
}
for i := uint64(0); i < l; i++ {
// If reward address doesn't exist, do nothing
if candidates[i].RewardAddress == "" {
log.S().Warnf("Candidate %s doesn't have a reward address", candidates[i].Address)
continue
}
rewardAddr, err := address.FromString(candidates[i].RewardAddress)
if err != nil {
return err
}
if err := p.grantToAccount(sm, rewardAddr, a.foundationBonus); err != nil {
return err
}
actualTotalReward = big.NewInt(0).Add(actualTotalReward, a.foundationBonus)
}
}
// Update actual reward
if err := p.updateAvailableBalance(sm, actualTotalReward); err != nil {
return err
}
return p.updateRewardHistory(sm, epochRewardHistoryKeyPrefix, epochNum)
}
// Claim claims the token from the rewarding fund
func (p *Protocol) Claim(
ctx context.Context,
sm protocol.StateManager,
amount *big.Int,
) error {
raCtx := protocol.MustGetRunActionsCtx(ctx)
if err := p.assertAmount(amount); err != nil {
return err
}
if err := p.updateTotalBalance(sm, amount); err != nil {
return err
}
return p.claimFromAccount(sm, raCtx.Caller, amount)
}
// UnclaimedBalance returns unclaimed balance of a given address
func (p *Protocol) UnclaimedBalance(
ctx context.Context,
sm protocol.StateManager,
addr address.Address,
) (*big.Int, error) {
acc := rewardAccount{}
accKey := append(adminKey, addr.Bytes()...)
err := p.state(sm, accKey, &acc)
if err == nil {
return acc.balance, nil
}
if errors.Cause(err) == state.ErrStateNotExist {
return big.NewInt(0), nil
}
return nil, err
}
func (p *Protocol) updateTotalBalance(sm protocol.StateManager, amount *big.Int) error {
f := fund{}
if err := p.state(sm, fundKey, &f); err != nil {
return err
}
totalBalance := big.NewInt(0).Sub(f.totalBalance, amount)
if totalBalance.Cmp(big.NewInt(0)) < 0 {
return errors.New("no enough total balance")
}
f.totalBalance = totalBalance
return p.putState(sm, fundKey, &f)
}
func (p *Protocol) updateAvailableBalance(sm protocol.StateManager, amount *big.Int) error {
f := fund{}
if err := p.state(sm, fundKey, &f); err != nil {
return err
}
availableBalance := big.NewInt(0).Sub(f.unclaimedBalance, amount)
if availableBalance.Cmp(big.NewInt(0)) < 0 {
return errors.New("no enough available balance")
}
f.unclaimedBalance = availableBalance
return p.putState(sm, fundKey, &f)
}
func (p *Protocol) grantToAccount(sm protocol.StateManager, addr address.Address, amount *big.Int) error {
acc := rewardAccount{}
accKey := append(adminKey, addr.Bytes()...)
if err := p.state(sm, accKey, &acc); err != nil {
if errors.Cause(err) != state.ErrStateNotExist {
return err
}
acc = rewardAccount{
balance: big.NewInt(0),
}
}
acc.balance = big.NewInt(0).Add(acc.balance, amount)
return p.putState(sm, accKey, &acc)
}
func (p *Protocol) claimFromAccount(sm protocol.StateManager, addr address.Address, amount *big.Int) error {
// Update reward account
acc := rewardAccount{}
accKey := append(adminKey, addr.Bytes()...)
if err := p.state(sm, accKey, &acc); err != nil {
return err
}
balance := big.NewInt(0).Sub(acc.balance, amount)
if balance.Cmp(big.NewInt(0)) < 0 {
return errors.New("no enough available balance")
} else if balance.Cmp(big.NewInt(0)) == 0 {
// If the account balance is cleared, delete if from the store
if err := p.deleteState(sm, accKey); err != nil {
return err
}
} else {
acc.balance = balance
if err := p.putState(sm, accKey, &acc); err != nil {
return err
}
}
// Update primary account
primAcc, err := accountutil.LoadOrCreateAccount(sm, addr.String(), big.NewInt(0))
if err != nil {
return err
}
primAcc.Balance = big.NewInt(0).Add(primAcc.Balance, amount)
return accountutil.StoreAccount(sm, addr.String(), primAcc)
}
func (p *Protocol) updateRewardHistory(sm protocol.StateManager, prefix []byte, index uint64) error {
var indexBytes [8]byte
enc.MachineEndian.PutUint64(indexBytes[:], index)
return p.putState(sm, append(prefix, indexBytes[:]...), &rewardHistory{})
}
func (p *Protocol) splitEpochReward(
sm protocol.StateManager,
candidates []*state.Candidate,
totalAmount *big.Int,
numDelegatesForEpochReward uint64,
uqd map[string]interface{},
) ([]address.Address, []*big.Int, error) {
// Remove the candidates who exempt from the epoch reward
e := exempt{}
if err := p.state(sm, exemptKey, &e); err != nil {
return nil, nil, err
}
exemptAddrs := make(map[string]interface{})
for _, addr := range e.addrs {
exemptAddrs[addr.String()] = nil
}
filteredCandidates := make([]*state.Candidate, 0)
for _, candidate := range candidates {
if _, ok := exemptAddrs[candidate.Address]; ok {
continue
}
filteredCandidates = append(filteredCandidates, candidate)
}
candidates = filteredCandidates
if len(candidates) == 0 {
return nil, nil, nil
}
// We at most allow numDelegatesForEpochReward delegates to get the epoch reward
if uint64(len(candidates)) > numDelegatesForEpochReward {
candidates = candidates[:numDelegatesForEpochReward]
}
totalWeight := big.NewInt(0)
rewardAddrs := make([]address.Address, 0)
for _, candidate := range candidates {
var rewardAddr address.Address
var err error
if candidate.RewardAddress != "" {
rewardAddr, err = address.FromString(candidate.RewardAddress)
if err != nil {
return nil, nil, err
}
} else {
log.S().Warnf("Candidate %s doesn't have a reward address", candidate.Address)
}
rewardAddrs = append(rewardAddrs, rewardAddr)
totalWeight = big.NewInt(0).Add(totalWeight, candidate.Votes)
}
amounts := make([]*big.Int, 0)
for _, candidate := range candidates {
// If not qualified, skip the epoch reward
if _, ok := uqd[candidate.Address]; ok {
amounts = append(amounts, big.NewInt(0))
continue
}
var amountPerAddr *big.Int
if totalWeight.Cmp(big.NewInt(0)) == 0 {
amountPerAddr = big.NewInt(0)
} else {
amountPerAddr = big.NewInt(0).Div(big.NewInt(0).Mul(totalAmount, candidate.Votes), totalWeight)
}
amounts = append(amounts, amountPerAddr)
}
return rewardAddrs, amounts, nil
}
func (p *Protocol) unqualifiedDelegates(
producer address.Address,
epochNum uint64,
productivityThreshold uint64,
) (map[string]interface{}, error) {
unqualifiedDelegates := make(map[string]interface{}, 0)
numBlks, produce, err := p.cm.ProductivityByEpoch(epochNum)
if err != nil {
return nil, err
}
// The current block is not included, so that we need to add it to the stats
numBlks++
produce[producer.String()]++
expectedNumBlks := numBlks / uint64(len(produce))
for addr, actualNumBlks := range produce {
if actualNumBlks*100/expectedNumBlks < productivityThreshold {
unqualifiedDelegates[addr] = nil
}
}
return unqualifiedDelegates, nil
}
func (p *Protocol) assertNoRewardYet(sm protocol.StateManager, prefix []byte, index uint64) error {
history := rewardHistory{}
var indexBytes [8]byte
enc.MachineEndian.PutUint64(indexBytes[:], index)
err := p.state(sm, append(prefix, indexBytes[:]...), &history)
if err == nil {
return errors.Errorf("reward history already exists on index %d", index)
}
if errors.Cause(err) != state.ErrStateNotExist {
return err
}
return nil
}
func (p *Protocol) assertLastBlockInEpoch(blkHeight uint64, epochNum uint64) error {
lastBlkHeight := p.rp.GetEpochLastBlockHeight(epochNum)
if blkHeight != lastBlkHeight {
return errors.Errorf("current block %d is not the last block of epoch %d", blkHeight, epochNum)
}
return nil
}
| 1 | 16,748 | I'd rather leave a TODO here at least. | iotexproject-iotex-core | go |
@@ -115,10 +115,10 @@ class BatchViewTest(BaseWebTest, unittest.TestCase):
request = {"path": "/mushrooms/"} # trailing slash
body = {"requests": [request]}
resp = self.app.post_json("/batch", body, headers=self.headers)
- collection = resp.json["responses"][0]
- self.assertEqual(collection["status"], 200)
- self.assertEqual(collection["path"], "/v0/mushrooms")
- self.assertEqual(collection["body"], {"data": []})
+ objects = resp.json["responses"][0]
+ self.assertEqual(objects["status"], 200)
+ self.assertEqual(objects["path"], "/v0/mushrooms")
+ self.assertEqual(objects["body"], {"data": []})
def test_body_is_transmitted_during_redirect(self):
request = { | 1 | import colander
import uuid
import unittest
from unittest import mock
from pyramid.response import Response
from kinto.core.views.batch import BatchPayloadSchema, batch as batch_service
from kinto.core.testing import DummyRequest
from kinto.core.utils import json
from .support import BaseWebTest
class BatchViewTest(BaseWebTest, unittest.TestCase):
def test_does_not_require_authentication(self):
body = {"requests": []}
self.app.post_json("/batch", body)
def test_returns_400_if_body_has_missing_requests(self):
self.app.post("/batch", {}, headers=self.headers, status=400)
def test_returns_responses_if_schema_is_valid(self):
body = {"requests": []}
resp = self.app.post_json("/batch", body, headers=self.headers)
self.assertIn("responses", resp.json)
def test_defaults_are_applied_to_requests(self):
request = {"path": "/v0/"}
defaults = {"method": "POST"}
result = self.app.post_json("/batch", {"requests": [request], "defaults": defaults})
self.assertEqual(result.json["responses"][0]["status"], 405)
def test_only_post_is_allowed(self):
self.app.get("/batch", headers=self.headers, status=405)
self.app.put("/batch", headers=self.headers, status=405)
self.app.patch("/batch", headers=self.headers, status=405)
self.app.delete("/batch", headers=self.headers, status=405)
def test_batch_adds_missing_api_with_prefix(self):
request = {"path": "/v0/"}
body = {"requests": [request]}
resp = self.app.post_json("/batch", body, headers=self.headers)
hello = resp.json["responses"][0]
self.assertEqual(hello["path"], "/v0/")
self.assertEqual(hello["status"], 200)
self.assertEqual(hello["body"]["project_name"], "myapp")
self.assertIn("application/json", hello["headers"]["Content-Type"])
def test_empty_response_body_with_head(self):
request = {"path": "/v0/", "method": "HEAD"}
body = {"requests": [request]}
resp = self.app.post_json("/batch", body, headers=self.headers)
head = resp.json["responses"][0]
self.assertEqual(head["body"], "")
self.assertNotEqual(len(head["headers"]), 0)
def test_api_errors_are_json_formatted(self):
request = {"path": "/unknown"}
body = {"requests": [request]}
resp = self.app.post_json("/batch", body, headers=self.headers)
error = resp.json["responses"][0]
self.assertEqual(error["body"]["code"], 404)
def test_internal_errors_makes_the_batch_fail(self):
request = {"path": "/v0/"}
body = {"requests": [request]}
with mock.patch("kinto.core.views.hello.get_eos") as mocked:
mocked.side_effect = AttributeError
self.app.post_json("/batch", body, headers=self.headers, status=500)
def test_errors_handled_by_view_does_not_make_the_batch_fail(self):
from requests.exceptions import HTTPError
request = {"path": "/v0/"}
body = {"requests": [request]}
with mock.patch("kinto.core.views.hello.get_eos") as mocked:
response = mock.MagicMock(status_code=404)
mocked.side_effect = HTTPError(response=response)
resp = self.app.post_json("/batch", body, headers=self.headers, status=200)
subresponse = resp.json["responses"][0]["body"]
self.assertEqual(subresponse, {"errno": 999, "code": 404, "error": "Not Found"})
def test_batch_cannot_be_recursive(self):
requests = {"requests": [{"path": "/v0/"}]}
request = {"method": "POST", "path": "/v0/batch", "body": requests}
body = {"requests": [request]}
resp = self.app.post_json("/batch", body, status=400)
self.assertIn("Recursive", resp.json["message"])
def test_batch_validates_json(self):
body = """{"requests": [{"path": "/v0/"},]}"""
resp = self.app.post(
"/batch", body, status=400, headers={"Content-Type": "application/json"}
)
self.assertIn("Invalid JSON", resp.json["message"])
def test_batch_should_reject_unaccepted_request_content_type(self):
request = {"requests": [{"path": "/v0/mushrooms"}]}
self.app.post("/batch", request, status=415, headers={"Content-Type": "text/plain"})
def test_responses_are_resolved_with_api_with_prefix(self):
request = {"path": "/"}
body = {"requests": [request]}
resp = self.app.post_json("/batch", body, headers=self.headers)
hello = resp.json["responses"][0]
self.assertEqual(hello["path"], "/v0/")
self.assertEqual(hello["status"], 200)
self.assertEqual(hello["body"]["project_name"], "myapp")
self.assertIn("application/json", hello["headers"]["Content-Type"])
def test_redirect_responses_are_followed(self):
request = {"path": "/mushrooms/"} # trailing slash
body = {"requests": [request]}
resp = self.app.post_json("/batch", body, headers=self.headers)
collection = resp.json["responses"][0]
self.assertEqual(collection["status"], 200)
self.assertEqual(collection["path"], "/v0/mushrooms")
self.assertEqual(collection["body"], {"data": []})
def test_body_is_transmitted_during_redirect(self):
request = {
"method": "PUT",
"path": "/mushrooms/{}/".format(str(uuid.uuid4())),
"body": {"data": {"name": "Trompette de la mort"}},
}
body = {"requests": [request]}
resp = self.app.post_json("/batch", body, headers=self.headers)
response = resp.json["responses"][0]
self.assertEqual(response["status"], 201)
record = response["body"]["data"]
self.assertEqual(record["name"], "Trompette de la mort")
def test_400_error_message_is_forwarded(self):
headers = {**self.headers, "If-Match": '"*"'}
request = {
"method": "PUT",
"path": "/mushrooms/{}".format(str(uuid.uuid4())),
"body": {"data": {"name": "Trompette de la mort"}},
"headers": headers,
}
body = {"requests": [request, request]}
resp = self.app.post_json("/batch", body, status=200)
self.assertEqual(resp.json["responses"][1]["status"], 400)
msg = "If-Match in header: The value should be integer between double quotes."
self.assertEqual(resp.json["responses"][1]["body"]["message"], msg)
def test_412_errors_are_forwarded(self):
headers = {**self.headers, "If-None-Match": "*"}
request = {
"method": "PUT",
"path": "/mushrooms/{}".format(str(uuid.uuid4())),
"body": {"data": {"name": "Trompette de la mort"}},
"headers": headers,
}
body = {"requests": [request, request]}
resp = self.app.post_json("/batch", body, status=200)
self.assertEqual(resp.json["responses"][0]["status"], 201)
self.assertEqual(resp.json["responses"][1]["status"], 412)
class BatchSchemaTest(unittest.TestCase):
def setUp(self):
self.schema = BatchPayloadSchema()
def assertInvalid(self, payload):
self.assertRaises(colander.Invalid, self.schema.deserialize, payload)
def test_requests_is_mandatory(self):
self.assertInvalid({})
def test_raise_invalid_on_unknown_attributes(self):
self.assertInvalid({"requests": [], "unknown": 42})
def test_list_of_requests_can_be_empty(self):
self.schema.deserialize({"requests": []})
def test_list_of_requests_must_be_a_list(self):
self.assertInvalid({"requests": {}})
def test_list_of_requests_must_be_dicts(self):
request = 42
self.assertInvalid({"defaults": {"path": "/"}, "requests": [request]})
def test_request_path_must_start_with_slash(self):
request = {"path": "http://localhost"}
self.assertInvalid({"requests": [request]})
def test_request_path_is_mandatory(self):
request = {"method": "HEAD"}
self.assertInvalid({"requests": [request]})
def test_request_method_must_be_known_uppercase_word(self):
request = {"path": "/", "method": "get"}
self.assertInvalid({"requests": [request]})
def test_raise_invalid_on_request_unknown_attributes(self):
request = {"path": "/", "method": "GET", "foo": 42}
self.assertInvalid({"requests": [request]})
#
# headers
#
def test_request_headers_should_be_strings(self):
headers = {"Accept": 3.14}
request = {"path": "/", "headers": headers}
self.assertInvalid({"requests": [request]})
def test_request_headers_cannot_be_recursive(self):
headers = {"Accept": {"sub": "dict"}}
request = {"path": "/", "headers": headers}
self.assertInvalid({"requests": [request]})
def test_request_headers_are_preserved(self):
headers = {"Accept": "audio/*"}
request = {"path": "/", "headers": headers}
deserialized = self.schema.deserialize({"requests": [request]})
self.assertEqual(deserialized["requests"][0]["headers"]["Accept"], "audio/*")
#
# body
#
def test_body_is_an_arbitrary_mapping(self):
payload = {"json": "payload"}
request = {"path": "/", "body": payload}
deserialized = self.schema.deserialize({"requests": [request]})
self.assertEqual(deserialized["requests"][0]["body"], payload)
#
# defaults
#
def test_defaults_must_be_a_mapping_if_specified(self):
request = {"path": "/"}
batch_payload = {"requests": [request], "defaults": 42}
self.assertInvalid(batch_payload)
def test_defaults_must_be_a_request_schema_if_specified(self):
request = {"path": "/"}
defaults = {"body": 3}
batch_payload = {"requests": [request], "defaults": defaults}
self.assertInvalid(batch_payload)
def test_raise_invalid_on_default_unknown_attributes(self):
request = {"path": "/"}
defaults = {"foo": "bar"}
self.assertInvalid({"requests": [request], "defaults": defaults})
def test_defaults_can_be_specified_empty(self):
request = {"path": "/"}
defaults = {}
batch_payload = {"requests": [request], "defaults": defaults}
self.schema.deserialize(batch_payload)
def test_defaults_path_is_applied_to_requests(self):
request = {"method": "GET"}
defaults = {"path": "/"}
batch_payload = {"requests": [request], "defaults": defaults}
result = self.schema.deserialize(batch_payload)
self.assertEqual(result["requests"][0]["path"], "/")
def test_defaults_body_is_applied_to_requests(self):
request = {"path": "/"}
defaults = {"body": {"json": "payload"}}
batch_payload = {"requests": [request], "defaults": defaults}
result = self.schema.deserialize(batch_payload)
self.assertEqual(result["requests"][0]["body"], {"json": "payload"})
def test_defaults_headers_are_applied_to_requests(self):
request = {"path": "/"}
defaults = {"headers": {"Content-Type": "text/html"}}
batch_payload = {"requests": [request], "defaults": defaults}
result = self.schema.deserialize(batch_payload)
self.assertEqual(result["requests"][0]["headers"]["Content-Type"], "text/html")
def test_defaults_values_do_not_overwrite_requests_values(self):
request = {"path": "/", "headers": {"Authorization": "me"}}
defaults = {"headers": {"Authorization": "you", "Accept": "*/*"}}
batch_payload = {"requests": [request], "defaults": defaults}
result = self.schema.deserialize(batch_payload)
self.assertEqual(
result["requests"][0]["headers"], {"Authorization": "me", "Accept": "*/*"}
)
def test_defaults_values_for_path_must_start_with_slash(self):
request = {}
defaults = {"path": "http://localhost"}
batch_payload = {"requests": [request], "defaults": defaults}
self.assertInvalid(batch_payload)
class BatchServiceTest(unittest.TestCase):
def setUp(self):
self.method, self.view, self.options = batch_service.definitions[0]
self.request = DummyRequest()
def post(self, validated):
self.request.validated = {"body": validated}
return self.view(self.request)
def test_returns_empty_list_of_responses_if_requests_empty(self):
result = self.post({"requests": []})
self.assertEqual(result["responses"], [])
def test_returns_one_response_per_request(self):
requests = [{"path": "/"}]
result = self.post({"requests": requests})
self.assertEqual(len(result["responses"]), len(requests))
def test_relies_on_pyramid_invoke_subrequest(self):
self.post({"requests": [{"path": "/"}]})
self.assertTrue(self.request.invoke_subrequest.called)
def test_returns_requests_path_in_responses(self):
result = self.post({"requests": [{"path": "/"}]})
self.assertEqual(result["responses"][0]["path"], "/v0/")
def test_subrequests_have_parent_attribute(self):
self.request.path = "/batch"
self.post({"requests": [{"path": "/"}]})
subrequest, = self.request.invoke_subrequest.call_args[0]
self.assertEqual(subrequest.parent.path, "/batch")
def test_subrequests_are_GET_by_default(self):
self.post({"requests": [{"path": "/"}]})
subrequest, = self.request.invoke_subrequest.call_args[0]
self.assertEqual(subrequest.method, "GET")
def test_original_request_headers_are_passed_to_subrequests(self):
self.request.headers["Authorization"] = "Basic ertyfghjkl"
self.post({"requests": [{"path": "/"}]})
subrequest, = self.request.invoke_subrequest.call_args[0]
self.assertIn("Basic", subrequest.headers["Authorization"])
def test_subrequests_body_are_json_serialized(self):
request = {"path": "/", "body": {"json": "payload"}}
self.post({"requests": [request]})
wanted = {"json": "payload"}
subrequest, = self.request.invoke_subrequest.call_args[0]
self.assertEqual(subrequest.body.decode("utf8"), json.dumps(wanted))
def test_subrequests_body_have_json_content_type(self):
self.request.headers["Content-Type"] = "text/xml"
request = {"path": "/", "body": {"json": "payload"}}
self.post({"requests": [request]})
subrequest, = self.request.invoke_subrequest.call_args[0]
self.assertIn("application/json", subrequest.headers["Content-Type"])
def test_subrequests_body_have_utf8_charset(self):
request = {"path": "/", "body": {"json": "😂"}}
self.post({"requests": [request]})
subrequest, = self.request.invoke_subrequest.call_args[0]
self.assertIn("charset=utf-8", subrequest.headers["Content-Type"])
wanted = {"json": "😂"}
self.assertEqual(subrequest.body.decode("utf8"), json.dumps(wanted))
def test_subrequests_paths_are_url_encoded(self):
request = {"path": "/test?param=©"}
self.post({"requests": [request]})
subrequest, = self.request.invoke_subrequest.call_args[0]
self.assertEqual(subrequest.path, "/v0/test")
self.assertEqual(subrequest.GET["param"], "©")
def test_subrequests_responses_paths_are_url_decoded(self):
request = {"path": "/test?param=©"}
resp = self.post({"requests": [request]})
path = resp["responses"][0]["path"]
self.assertEqual(path, "/v0/test")
def test_response_body_is_string_if_remote_response_is_not_json(self):
response = Response(body="Internal Error")
self.request.invoke_subrequest.return_value = response
request = {"path": "/test"}
resp = self.post({"requests": [request]})
body = resp["responses"][0]["body"].decode("utf-8")
self.assertEqual(body, "Internal Error")
def test_number_of_requests_is_not_limited_when_settings_set_to_none(self):
self.request.registry.settings["batch_max_requests"] = None
requests = {}
for i in range(30):
requests.setdefault("requests", []).append({"path": "/"})
self.post(requests)
def test_number_of_requests_is_limited_to_25_by_default(self):
requests = {}
for i in range(26):
requests.setdefault("requests", []).append({"path": "/"})
result = self.post(requests)
self.assertEqual(
self.request.errors[0]["description"], "Number of requests is limited to 25"
)
self.assertIsNone(result) # rest of view not executed
def test_return_400_if_number_of_requests_is_greater_than_settings(self):
self.request.registry.settings["batch_max_requests"] = 22
requests = {}
for i in range(23):
requests.setdefault("requests", []).append({"path": "/"})
result = self.post(requests)
self.assertEqual(
self.request.errors[0]["description"], "Number of requests is limited to 22"
)
self.assertIsNone(result) # rest of view not executed
| 1 | 12,010 | Shouldn't this be `resource`? | Kinto-kinto | py |
@@ -37,13 +37,12 @@ namespace {
/** CPU implementation of evaluation layer forward prop. */
void fp_cpu(lbann_comm& comm,
const AbsDistMat& input,
- DataType& value,
- Al::request& req) {
+ DataType& value) {
const auto& local_input = input.LockedMatrix();
const auto& local_height = local_input.Height();
const auto& local_width = local_input.Width();
const auto& mini_batch_size = input.Width();
- value = DataType(0);
+ value = 0;
#pragma omp parallel for reduction(+:value) collapse(2)
for (El::Int col = 0; col < local_width; ++col) {
for (El::Int row = 0; row < local_height; ++row) { | 1 | ////////////////////////////////////////////////////////////////////////////////
// Copyright (c) 2014-2016, Lawrence Livermore National Security, LLC.
// Produced at the Lawrence Livermore National Laboratory.
// Written by the LBANN Research Team (B. Van Essen, et al.) listed in
// the CONTRIBUTORS file. <[email protected]>
//
// LLNL-CODE-697807.
// All rights reserved.
//
// This file is part of LBANN: Livermore Big Artificial Neural Network
// Toolkit. For details, see http://software.llnl.gov/LBANN or
// https://github.com/LLNL/LBANN.
//
// Licensed under the Apache License, Version 2.0 (the "Licensee"); you
// may not use this file except in compliance with the License. You may
// obtain a copy of the License at:
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied. See the License for the specific language governing
// permissions and limitations under the license.
////////////////////////////////////////////////////////////////////////////////
#include "lbann/layers/transform/evaluation.hpp"
#include "lbann/utils/exception.hpp"
#ifdef LBANN_HAS_GPU
#include "lbann/utils/cublas.hpp"
#endif // LBANN_HAS_GPU
namespace lbann {
namespace {
/** CPU implementation of evaluation layer forward prop. */
void fp_cpu(lbann_comm& comm,
const AbsDistMat& input,
DataType& value,
Al::request& req) {
const auto& local_input = input.LockedMatrix();
const auto& local_height = local_input.Height();
const auto& local_width = local_input.Width();
const auto& mini_batch_size = input.Width();
value = DataType(0);
#pragma omp parallel for reduction(+:value) collapse(2)
for (El::Int col = 0; col < local_width; ++col) {
for (El::Int row = 0; row < local_height; ++row) {
value += local_input(row, col);
}
}
value = value / mini_batch_size;
comm.nb_allreduce(&value, 1, input.DistComm(), req);
}
#ifdef LBANN_HAS_GPU
/** GPU implementation of evaluation layer forward prop. */
void fp_gpu(lbann_comm& comm,
const AbsDistMat& input,
DataType& value,
Al::request& req) {
// Local matrix
const auto& local_input = input.LockedMatrix();
const auto& local_height = local_input.Height();
const auto& local_width = local_input.Width();
const auto& mini_batch_size = input.Width();
// GPU objects
GPUMat sum_d, ones_d;
#ifdef HYDROGEN_HAVE_CUB
sum_d.SetMemoryMode(1); // Use CUB GPU memory pool if possible
ones_d.SetMemoryMode(1); // Use CUB GPU memory pool if possible
#endif // HYDROGEN_HAVE_CUB
auto&& handle = El::GPUManager::cuBLASHandle();
CHECK_CUBLAS(cublasSetPointerMode(handle, CUBLAS_POINTER_MODE_DEVICE));
// Compute sum of local input matrix entries
if (local_height < 1 || local_width < 1) {
El::Zeros(sum_d, 1, 1);
} else if (local_height == local_input.LDim() || local_width == 1) {
sum_d.Resize(1, 1);
ones_d.Resize(local_height * local_width, 1);
El::Fill(ones_d, DataType(1));
cublas::dot(handle,
local_height * local_width,
local_input.LockedBuffer(), 1,
ones_d.LockedBuffer(), 1,
sum_d.Buffer());
} else if (local_height == 1) {
sum_d.Resize(1, 1);
ones_d.Resize(local_width, 1);
El::Fill(ones_d, DataType(1));
cublas::dot(handle,
local_width,
local_input.LockedBuffer(), local_input.LDim(),
ones_d.LockedBuffer(), 1,
sum_d.Buffer());
} else {
sum_d.Resize(local_width + 1, 1);
ones_d.Resize(std::max(local_height, local_width), 1);
El::Fill(ones_d, DataType(1));
for (El::Int col = 0; col < local_width; ++col) {
cublas::dot(handle,
local_height,
local_input.LockedBuffer(0, col), 1,
ones_d.LockedBuffer(), 1,
sum_d.Buffer(col+1, 0));
}
cublas::dot(handle,
local_width,
sum_d.LockedBuffer(1, 0), 1,
ones_d.LockedBuffer(), 1,
sum_d.Buffer(0, 0));
}
CHECK_CUBLAS(cublasSetPointerMode(handle, CUBLAS_POINTER_MODE_HOST));
// Compute average value across mini-batch
CHECK_CUDA(cudaMemcpy(&value, sum_d.LockedBuffer(), sizeof(DataType),
cudaMemcpyDeviceToHost));
value = value / mini_batch_size;
comm.nb_allreduce(&value, 1, input.DistComm(), req);
}
#endif // LBANN_HAS_GPU
} // namespace
EvalType abstract_evaluation_layer::get_value(bool scaled) {
get_comm()->wait(m_allreduce_req);
if (scaled) { return m_scale * m_value; }
else { return m_value; }
}
abstract_evaluation_layer::abstract_evaluation_layer(lbann_comm *comm)
: transform_layer(comm), m_scale(0), m_value(0) {
// Evaluation layer has no children
m_expected_num_child_layers = 0;
}
void abstract_evaluation_layer::fp_compute() {
switch (get_device_allocation()) {
case El::Device::CPU:
fp_cpu(*get_comm(), get_prev_activations(), m_value, m_allreduce_req);
break;
#ifdef LBANN_HAS_GPU
case El::Device::GPU:
fp_gpu(*get_comm(), get_prev_activations(), m_value, m_allreduce_req);
break;
#endif // LBANN_HAS_GPU
default: LBANN_ERROR("invalid device");
}
}
void abstract_evaluation_layer::bp_compute() {
El::Fill(get_error_signals(), DataType(m_scale));
}
abstract_evaluation_layer*
abstract_evaluation_layer::construct(lbann_comm *comm,
data_layout layout,
El::Device device) {
#define EVAL_LAYER_CONSTRUCT(T_layout, T_device) \
do { \
if (layout == T_layout && device == T_device) { \
return new evaluation_layer<T_layout, T_device>(comm); \
} \
} while (false)
EVAL_LAYER_CONSTRUCT(data_layout::DATA_PARALLEL, El::Device::CPU);
EVAL_LAYER_CONSTRUCT(data_layout::MODEL_PARALLEL, El::Device::CPU);
#ifdef LBANN_HAS_GPU
EVAL_LAYER_CONSTRUCT(data_layout::DATA_PARALLEL, El::Device::GPU);
EVAL_LAYER_CONSTRUCT(data_layout::MODEL_PARALLEL, El::Device::GPU);
#endif // LBANN_HAS_GPU
#undef EVAL_LAYER_CONSTRUCT
// Could not construct evaluation layer
std::stringstream err;
err << "attempted to construct evaluation layer "
<< "with invalid parameters "
<< "(data layout type " << static_cast<int>(layout) << ", "
<< "device type " << static_cast<int>(device) << ")";
LBANN_ERROR(err.str());
return nullptr;
}
} // namespace lbann
| 1 | 13,205 | Why does this become blocking for the CPU path? Shouldn't it remain independent of the GPU path? | LLNL-lbann | cpp |
@@ -24,7 +24,7 @@ module Travis
sh.export 'TRAVIS_RUBY_VERSION', config[:rvm], echo: false if rvm?
end
- def setup
+ def configure
super
setup_rvm if rvm?
end | 1 | module Travis
module Build
class Script
module RVM
include Chruby
MSGS = {
setup_ruby_head: 'Setting up latest %s'
}
CONFIG = %w(
rvm_remote_server_url3=https://s3.amazonaws.com/travis-rubies/binaries
rvm_remote_server_type3=rubies
rvm_remote_server_verify_downloads3=1
)
RVM_VERSION_ALIASES = {
'2.3' => '2.3.4',
'2.4' => '2.4.1'
}
def export
super
sh.export 'TRAVIS_RUBY_VERSION', config[:rvm], echo: false if rvm?
end
def setup
super
setup_rvm if rvm?
end
def announce
super
sh.cmd 'ruby --version'
sh.cmd 'rvm --version' if rvm?
end
def cache_slug
super.tap { |slug| slug << "--rvm-" << ruby_version.to_s if rvm? }
end
private
def version
config[:rvm].to_s
end
def rvm?
!!config[:rvm]
end
def ruby_version
vers = config[:rvm].to_s.gsub(/-(1[89]|2[01])mode$/, '-d\1')
force_187_p371 vers
end
def setup_rvm
write_default_gems
if without_teeny?(version)
setup_rvm_aliases
end
sh.cmd('type rvm &>/dev/null || source ~/.rvm/scripts/rvm', echo: false, assert: false, timing: false)
sh.file '$rvm_path/user/db', CONFIG.join("\n")
send rvm_strategy
end
def rvm_strategy
return :use_ruby_head if ruby_version.include?('ruby-head')
return :use_default_ruby if ruby_version == 'default'
:use_ruby_version
end
def use_ruby_head
sh.fold('rvm') do
sh.echo MSGS[:setup_ruby_head] % ruby_version, ansi: :yellow
sh.cmd "rvm get stable", assert: false if ruby_version == 'jruby-head'
sh.export 'ruby_alias', "`rvm alias show #{ruby_version} 2>/dev/null`"
sh.cmd "rvm alias delete #{ruby_version}"
sh.cmd "rvm remove ${ruby_alias:-#{ruby_version}} --gems"
sh.cmd "rvm remove #{ruby_version} --gems --fuzzy"
sh.cmd "rvm install #{ruby_version} --binary"
sh.cmd "rvm use #{ruby_version}"
end
end
def use_default_ruby
sh.if '-f .ruby-version' do
use_ruby_version_file
end
sh.else do
use_rvm_default_ruby
end
end
def use_ruby_version_file
sh.fold('rvm') do
sh.cmd 'rvm use $(< .ruby-version) --install --binary --fuzzy'
end
end
def use_rvm_default_ruby
sh.fold('rvm') do
sh.cmd "rvm use default", timing: true
end
end
def use_ruby_version
skip_deps_install if rbx?
sh.fold('rvm') do
if ruby_version.start_with? 'ree'
sh.if "! $(rvm list | grep ree)" do
sh.echo "Installing REE from source. This may take a few minutes.", ansi: :yellow
sh.cmd "sed -i 's|^\\(ree_1.8.7_url\\)=.*$|\\1=https://storage.googleapis.com/google-code-archive-downloads/v2/code.google.com/rubyenterpriseedition|' $HOME/.rvm/config/db"
sh.cmd "rvm use #{ruby_version} --install --fuzzy"
end
sh.else do
sh.cmd "rvm use #{ruby_version} --install --binary --fuzzy"
end
else
sh.cmd "rvm use #{ruby_version} --install --binary --fuzzy"
end
end
end
def rbx?
/^(rbx\S*)/.match(version)
end
def skip_deps_install
sh.cmd "rvm autolibs disable", echo: false, timing: false
end
def write_default_gems
sh.mkdir '$rvm_path/gemsets', recursive: true, echo: false
sh.cmd 'echo -e "gem-wrappers\nrubygems-bundler\nbundler\nrake\nrvm\n" > $rvm_path/gemsets/global.gems', echo: false, timing: false
sh.cmd 'echo -e "jruby-openssl\njruby-launcher\ngem-wrappers\nrubygems-bundler\nbundler\nrake\nrvm\n" > $rvm_path/gemsets/jruby/global.gems', echo: false, timing: false
end
def force_187_p371(version)
version.gsub(/^1\.8\.7.*$/, '1.8.7-p371')
end
def setup_rvm_aliases
RVM_VERSION_ALIASES.select {|k,v| k == version}.each do |alias_version, real_version|
grep_str = alias_version.gsub('.', '\\\\\\.')
sh.if "-z $(rvm alias list | grep ^#{grep_str})" do
sh.cmd "rvm alias create #{alias_version} ruby-#{real_version}", echo: true, assert: true
end
end
end
def without_teeny?(version)
version =~ /\A(\d+)(\.\d+)\z/
end
end
end
end
end
| 1 | 15,414 | Do we need to change occurences where `setup` was called before? | travis-ci-travis-build | rb |
@@ -61,6 +61,16 @@ func (a *API) resolveReferences() {
o.ErrorRefs[i].Shape.IsError = true
}
}
+
+ // TODO put this somewhere better
+ for _, s := range a.Shapes {
+ switch s.Type {
+ case "list":
+ s.MemberRef.Shape.UsedInList = true
+ case "map":
+ s.ValueRef.Shape.UsedInMap = true
+ }
+ }
}
// A referenceResolver provides a way to resolve shape references to | 1 | // +build codegen
package api
import (
"fmt"
"regexp"
"strings"
)
// updateTopLevelShapeReferences moves resultWrapper, locationName, and
// xmlNamespace traits from toplevel shape references to the toplevel
// shapes for easier code generation
func (a *API) updateTopLevelShapeReferences() {
for _, o := range a.Operations {
// these are for REST-XML services
if o.InputRef.LocationName != "" {
o.InputRef.Shape.LocationName = o.InputRef.LocationName
}
if o.InputRef.Location != "" {
o.InputRef.Shape.Location = o.InputRef.Location
}
if o.InputRef.Payload != "" {
o.InputRef.Shape.Payload = o.InputRef.Payload
}
if o.InputRef.XMLNamespace.Prefix != "" {
o.InputRef.Shape.XMLNamespace.Prefix = o.InputRef.XMLNamespace.Prefix
}
if o.InputRef.XMLNamespace.URI != "" {
o.InputRef.Shape.XMLNamespace.URI = o.InputRef.XMLNamespace.URI
}
}
}
// writeShapeNames sets each shape's API and shape name values. Binding the
// shape to its parent API.
func (a *API) writeShapeNames() {
for n, s := range a.Shapes {
s.API = a
s.ShapeName = n
}
}
func (a *API) resolveReferences() {
resolver := referenceResolver{API: a, visited: map[*ShapeRef]bool{}}
for _, s := range a.Shapes {
resolver.resolveShape(s)
}
for _, o := range a.Operations {
o.API = a // resolve parent reference
resolver.resolveReference(&o.InputRef)
resolver.resolveReference(&o.OutputRef)
// Resolve references for errors also
for i := range o.ErrorRefs {
resolver.resolveReference(&o.ErrorRefs[i])
o.ErrorRefs[i].Shape.IsError = true
}
}
}
// A referenceResolver provides a way to resolve shape references to
// shape definitions.
type referenceResolver struct {
*API
visited map[*ShapeRef]bool
}
var jsonvalueShape = &Shape{
ShapeName: "JSONValue",
Type: "jsonvalue",
ValueRef: ShapeRef{
JSONValue: true,
},
}
// resolveReference updates a shape reference to reference the API and
// its shape definition. All other nested references are also resolved.
func (r *referenceResolver) resolveReference(ref *ShapeRef) {
if ref.ShapeName == "" {
return
}
if shape, ok := r.API.Shapes[ref.ShapeName]; ok {
if ref.JSONValue {
ref.ShapeName = "JSONValue"
r.API.Shapes[ref.ShapeName] = jsonvalueShape
}
ref.API = r.API // resolve reference back to API
ref.Shape = shape // resolve shape reference
if r.visited[ref] {
return
}
r.visited[ref] = true
shape.refs = append(shape.refs, ref) // register the ref
// resolve shape's references, if it has any
r.resolveShape(shape)
}
}
// resolveShape resolves a shape's Member Key Value, and nested member
// shape references.
func (r *referenceResolver) resolveShape(shape *Shape) {
r.resolveReference(&shape.MemberRef)
r.resolveReference(&shape.KeyRef)
r.resolveReference(&shape.ValueRef)
for _, m := range shape.MemberRefs {
r.resolveReference(m)
}
}
// renameToplevelShapes renames all top level shapes of an API to their
// exportable variant. The shapes are also updated to include notations
// if they are Input or Outputs.
func (a *API) renameToplevelShapes() {
for _, v := range a.OperationList() {
if v.HasInput() {
name := v.ExportedName + "Input"
switch {
case a.Shapes[name] == nil:
if service, ok := shamelist[a.name]; ok {
if check, ok := service[v.Name]; ok && check.input {
break
}
}
v.InputRef.Shape.Rename(name)
}
}
if v.HasOutput() {
name := v.ExportedName + "Output"
switch {
case a.Shapes[name] == nil:
if service, ok := shamelist[a.name]; ok {
if check, ok := service[v.Name]; ok && check.output {
break
}
}
v.OutputRef.Shape.Rename(name)
}
}
v.InputRef.Payload = a.ExportableName(v.InputRef.Payload)
v.OutputRef.Payload = a.ExportableName(v.OutputRef.Payload)
}
}
// fixStutterNames fixes all name struttering based on Go naming conventions.
// "Stuttering" is when the prefix of a structure or function matches the
// package name (case insensitive).
func (a *API) fixStutterNames() {
str, end := a.StructName(), ""
if len(str) > 1 {
l := len(str) - 1
str, end = str[0:l], str[l:]
}
re := regexp.MustCompile(fmt.Sprintf(`\A(?i:%s)%s`, str, end))
for name, op := range a.Operations {
newName := re.ReplaceAllString(name, "")
if newName != name {
delete(a.Operations, name)
a.Operations[newName] = op
}
op.ExportedName = newName
}
for k, s := range a.Shapes {
newName := re.ReplaceAllString(k, "")
if newName != s.ShapeName {
s.Rename(newName)
}
}
}
// renameExportable renames all operation names to be exportable names.
// All nested Shape names are also updated to the exportable variant.
func (a *API) renameExportable() {
for name, op := range a.Operations {
newName := a.ExportableName(name)
if newName != name {
delete(a.Operations, name)
a.Operations[newName] = op
}
op.ExportedName = newName
}
for k, s := range a.Shapes {
// FIXME SNS has lower and uppercased shape names with the same name,
// except the lowercased variant is used exclusively for string and
// other primitive types. Renaming both would cause a collision.
// We work around this by only renaming the structure shapes.
if s.Type == "string" {
continue
}
for mName, member := range s.MemberRefs {
newName := a.ExportableName(mName)
if newName != mName {
delete(s.MemberRefs, mName)
s.MemberRefs[newName] = member
// also apply locationName trait so we keep the old one
// but only if there's no locationName trait on ref or shape
if member.LocationName == "" && member.Shape.LocationName == "" {
member.LocationName = mName
}
}
if newName == "_" {
panic("Shape " + s.ShapeName + " uses reserved member name '_'")
}
}
newName := a.ExportableName(k)
if newName != s.ShapeName {
s.Rename(newName)
}
s.Payload = a.ExportableName(s.Payload)
// fix required trait names
for i, n := range s.Required {
s.Required[i] = a.ExportableName(n)
}
}
for _, s := range a.Shapes {
// fix enum names
if s.IsEnum() {
s.EnumConsts = make([]string, len(s.Enum))
for i := range s.Enum {
shape := s.ShapeName
shape = strings.ToUpper(shape[0:1]) + shape[1:]
s.EnumConsts[i] = shape + s.EnumName(i)
}
}
}
}
// createInputOutputShapes creates toplevel input/output shapes if they
// have not been defined in the API. This normalizes all APIs to always
// have an input and output structure in the signature.
func (a *API) createInputOutputShapes() {
for _, op := range a.Operations {
if !op.HasInput() {
setAsPlacholderShape(&op.InputRef, op.ExportedName+"Input", a)
}
if !op.HasOutput() {
setAsPlacholderShape(&op.OutputRef, op.ExportedName+"Output", a)
}
}
}
func setAsPlacholderShape(tgtShapeRef *ShapeRef, name string, a *API) {
shape := a.makeIOShape(name)
shape.Placeholder = true
*tgtShapeRef = ShapeRef{API: a, ShapeName: shape.ShapeName, Shape: shape}
shape.refs = append(shape.refs, tgtShapeRef)
}
// makeIOShape returns a pointer to a new Shape initialized by the name provided.
func (a *API) makeIOShape(name string) *Shape {
shape := &Shape{
API: a, ShapeName: name, Type: "structure",
MemberRefs: map[string]*ShapeRef{},
}
a.Shapes[name] = shape
return shape
}
// removeUnusedShapes removes shapes from the API which are not referenced by any
// other shape in the API.
func (a *API) removeUnusedShapes() {
for n, s := range a.Shapes {
if len(s.refs) == 0 {
delete(a.Shapes, n)
}
}
}
// Represents the service package name to EndpointsID mapping
var custEndpointsKey = map[string]string{
"applicationautoscaling": "application-autoscaling",
}
// Sents the EndpointsID field of Metadata with the value of the
// EndpointPrefix if EndpointsID is not set. Also adds
// customizations for services if EndpointPrefix is not a valid key.
func (a *API) setMetadataEndpointsKey() {
if len(a.Metadata.EndpointsID) != 0 {
return
}
if v, ok := custEndpointsKey[a.PackageName()]; ok {
a.Metadata.EndpointsID = v
} else {
a.Metadata.EndpointsID = a.Metadata.EndpointPrefix
}
}
| 1 | 8,858 | Is this TODO still valid? Or are we going to put this somewhere else later? | aws-aws-sdk-go | go |
@@ -15,12 +15,17 @@ def generate_data(test_user_id, user_name, from_ts, num_records):
test_data = []
artist_msid = str(uuid.uuid4())
- for i in range(num_records):
+ if (from_ts == None): #check for playing now listens
+ timestamp = None
+ else:
from_ts += 1 # Add one second
+ timestamp = datetime.utcfromtimestamp(from_ts)
+
+ for i in range(num_records):
item = Listen(
user_name=user_name,
user_id=test_user_id,
- timestamp=datetime.utcfromtimestamp(from_ts),
+ timestamp= timestamp,
artist_msid=artist_msid,
recording_msid=str(uuid.uuid4()),
data={ | 1 | # coding=utf-8
import json
import os
import uuid
from datetime import datetime
from listenbrainz.listen import Listen
TEST_DATA_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', '..', 'testdata')
def generate_data(test_user_id, user_name, from_ts, num_records):
test_data = []
artist_msid = str(uuid.uuid4())
for i in range(num_records):
from_ts += 1 # Add one second
item = Listen(
user_name=user_name,
user_id=test_user_id,
timestamp=datetime.utcfromtimestamp(from_ts),
artist_msid=artist_msid,
recording_msid=str(uuid.uuid4()),
data={
'artist_name': 'Frank Ocean',
'track_name': 'Crack Rock',
'additional_info': {},
},
)
test_data.append(item)
return test_data
def to_epoch(date):
return int((date - datetime.utcfromtimestamp(0)).total_seconds())
def create_test_data_for_influxlistenstore(user_name):
"""Create listens for influxlistenstore tests.
From a json file 'influx_listenstore_test_listens.json' in testdata
it creates Listen objects with a specified user_name for tests.
Args:
user_name (str): MusicBrainz username of a user.
Returns:
A list of Listen objects.
"""
test_data_file = os.path.join(TEST_DATA_PATH, 'influx_listenstore_test_listens.json')
with open(test_data_file, 'r') as f:
listens = json.load(f)
test_data = []
for listen in listens['payload']:
listen['user_name'] = user_name
test_data.append(Listen().from_json(listen))
return test_data
| 1 | 15,137 | We generally don't do parantheses in if conditions in Python. :) this could be better written as `if from_ts is None` | metabrainz-listenbrainz-server | py |
@@ -460,9 +460,13 @@ void tm_process_req_requestregioninfo(CTmTxMessage * pp_msg)
TM_Txid_legacy lv_transid;
} u;
- char tname[2000], ername[50], rname[100], offline[20], regid[200], hostname[200], port[100];
+ char tname[2000];
+ tname[299] = '\0';
+/*
+ char ername[50], rname[100], offline[20], regid[200], hostname[200], port[100];
tname[299] = '\0', ername[49] = '\0', rname[99] = '\0', offline[19] = '\0';
regid[199]= '\0', hostname[199]='\0', port[99]='\0';
+*/
TMTrace(2, ("tm_process_req_requestregioninfo ENTRY.\n"));
HashMapArray* map = HbaseTM_process_request_regions_info(); | 1 | // @@@ START COPYRIGHT @@@
//
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//
// @@@ END COPYRIGHT @@@
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <sys/time.h>
// General Seaquest includes
#include "SCMVersHelp.h"
// seabed includes
#include "seabed/ms.h"
#include "seabed/pctl.h"
#include "seabed/pevents.h"
#include "seabed/trace.h"
#include "seabed/thread.h"
#include "common/sq_common.h"
// tm includes
#include "rm.h"
#include "tmaudit.h"
// #include "tmmap.h"
#include "tmregistry.h"
#include "tmlogging.h"
#include "tmrecov.h"
#include "tmshutdown.h"
#include "tmpool.h"
#include "tmstats.h"
#include "tmglobals.h"
#include "tmtimer.h"
#include "tmthreadeg.h"
#include "tminfo.h"
#include "tmrecov.h"
#include "tmtxbranches.h"
#include "hbasetm.h"
extern void tm_xarm_initialize();
extern void tm_process_msg_from_xarm(CTmTxMessage * pp_msg);
extern int HbaseTM_initialize (bool pp_tracing, bool pv_tm_stats, CTmTimer *pp_tmTimer, short pv_nid);
extern int HbaseTM_initiate_stall(int where);
extern HashMapArray* HbaseTM_process_request_regions_info();
// Version
DEFINE_EXTERN_COMP_DOVERS(tm)
// global data
CTmTxBranches gv_RMs;
TM_MAP gv_sync_map;
int32 gv_system_tx_count = 0;
TM_Info gv_tm_info;
SB_Int64_Type gv_wait_interval = TM_DEFAULT_WAIT_INTERVAL;
CTmThreadExample *gp_tmExampleThread;
// ---------------------------------------------------------------
// misc helper routines
// ---------------------------------------------------------------
void tm_fill_perf_stats_buffer (Tm_Perf_Stats_Rsp_Type *pp_buffer)
{
int64 lv_tx_count = gv_tm_info.tx_count();
int64 lv_abort_count = gv_tm_info.abort_count();
int64 lv_commit_count = gv_tm_info.commit_count();
int64 lv_current_tx_count = gv_tm_info.current_tx_count();
int32 lv_tm_abort_count = gv_tm_info.tm_initiated_aborts();
int32 lv_hung_tx_count = gv_tm_info.tx_hung_count();
TMTrace (2, ("tm_fill_perf_stats_buffer : tx count : " PFLL ", abort count " PFLL ", commit count " PFLL ", current tx count " PFLL ".\n", lv_tx_count, lv_abort_count, lv_commit_count, lv_current_tx_count));
pp_buffer->iv_error = 0;
pp_buffer->iv_tx_count = lv_tx_count;
pp_buffer->iv_abort_count = lv_abort_count;
pp_buffer->iv_commit_count = lv_commit_count;
pp_buffer->iv_tm_initiated_aborts = lv_tm_abort_count;
pp_buffer->iv_hung_tx_count = lv_hung_tx_count;
pp_buffer->iv_outstanding_tx_count = lv_current_tx_count;
pp_buffer->iv_oldest_transid_1 = 0;
pp_buffer->iv_oldest_transid_2 = 0;
pp_buffer->iv_oldest_transid_3 = 0;
pp_buffer->iv_oldest_transid_4 = 0;
}
void tm_fill_sys_status_buffer (Tm_Sys_Status_Rsp_Type *pp_buffer)
{
int32 lv_up = 0;
int32 lv_down = 0;
if(gv_tm_info.state() == TM_STATE_UP) {
lv_up = 1;
}
else if(gv_tm_info.state() == TM_STATE_DOWN ||
gv_tm_info.state() == TM_STATE_WAITING_RM_OPEN) {
lv_down = 1;
}
int32 lv_recovering = 0;
if(gv_tm_info.sys_recov_state() != TM_SYS_RECOV_STATE_END) {
lv_recovering = 1;
}
int32 lv_totaltms = 1;
int32 lv_leadtm = 0;
if(gv_tm_info.lead_tm()) {
lv_leadtm = 1;
}
int32 lv_activetxns = gv_tm_info.num_active_txs();
// If we're still in recovery we need to add any transactions
// still queued to recover.
if (gv_tm_info.ClusterRecov())
lv_activetxns += gv_tm_info.ClusterRecov()->txnStateList()->size();
TMTrace (2, ("tm_fill_sys_status_buffer : up %d, down %d, recovering %d, activetxns %d.\n", lv_up, lv_down, lv_recovering, lv_activetxns));
pp_buffer->iv_status_system.iv_up = lv_up;
pp_buffer->iv_status_system.iv_down = lv_down;
pp_buffer->iv_status_system.iv_recovering = lv_recovering;
pp_buffer->iv_status_system.iv_totaltms = lv_totaltms;
pp_buffer->iv_status_system.iv_activetxns = lv_activetxns;
pp_buffer->iv_status_system.iv_leadtm = lv_leadtm;
}
// ---------------------------------------------------------
// tm_initialize_rsp_hdr
// Purpose : Initialize the header field for a message response.
// Note this should only be used for broadcast syncs now.
// All TM Library responses are through the CTmTxMessage class.
// ---------------------------------------------------------
int tm_initialize_rsp_hdr(short rsp_type, Tm_Rsp_Msg_Type *pp_rsp)
{
pp_rsp->iv_msg_hdr.dialect_type = DIALECT_TM_SQ;
pp_rsp->iv_msg_hdr.rr_type.reply_type = (short) (rsp_type + 1);
pp_rsp->iv_msg_hdr.version.reply_version = TM_SQ_MSG_VERSION_CURRENT;
pp_rsp->iv_msg_hdr.miv_err.error = FEOK;
return FEOK;
}
// ----------------------------------------------------------------
// tm_start_auditThread
// Purpose : Start the audit thread.
// ----------------------------------------------------------------
void tm_start_auditThread()
{
char lv_name[20];
TMTrace(2, ("tm_start_auditThread ENTRY\n"));
// Instantiate timer object
sprintf(lv_name, "auditTh");
CTmAuditObj *lp_auditobj = new CTmAuditObj(auditThread_main, (const char *) &lv_name);
if (lp_auditobj)
{
gv_tm_info.tmAuditObj(lp_auditobj);
gv_tm_info.initialize_adp();
}
else
{
tm_log_event(DTM_TMTIMER_FAILED, SQ_LOG_CRIT, "DTM_TMTIMER_FAILED");
TMTrace(1, ("tm_start_auditThread - Failed to instantiate audit object.\n"));
abort();
}
TMTrace(2, ("tm_start_auditThread EXIT. Timer thread %s(%p) started.\n",
lv_name, (void *) lp_auditobj));
} //tm_start_auditThread
// ----------------------------------------------------------------
// tm_start_timerThread
// Purpose : Start the timer thread. This is used whenever an
// internal timed event occurs.
// ----------------------------------------------------------------
void tm_start_timerThread()
{
char lv_name[20];
TMTrace(2, ("tm_start_timerThread ENTRY\n"));
// Instantiate timer object
sprintf(lv_name, "timerTh");
CTmTimer *lp_timer = new CTmTimer(timerThread_main, -1, (const char *) &lv_name,
gv_tm_info.timerDefaultWaitTime());
if (lp_timer)
{
gv_tm_info.tmTimer(lp_timer);
gv_startTime = lp_timer->startTime();
lp_timer->addControlpointEvent(gv_tm_info.cp_interval());
if (gv_tm_info.lead_tm())
{
TMTrace(2, ("tm_start_timerThread lead DTM, adding timer events\n"));
lp_timer->addStatsEvent(gv_tm_info.stats_interval());
}
lp_timer->addRMRetryEvent(gv_tm_info.RMRetry_interval());
}
else
{
tm_log_event(DTM_TMTIMER_FAILED, SQ_LOG_CRIT, "DTM_TMTIMER_FAILED");
TMTrace(1, ("tm_start_timerThread - Failed to instantiate timer object.\n"));
abort();
}
TMTrace(2, ("tm_start_timerThread EXIT. Timer thread %s(%p) started.\n",
lv_name, (void *) lp_timer));
} //tm_start_timerThread
// ----------------------------------------------------------------
// tm_start_exampleThread
// Purpose : Start the example thread.
// ----------------------------------------------------------------
void tm_start_exampleThread()
{
char lv_name[20];
TMTrace(2, ("tm_start_exampleThread ENTRY\n"));
// Instantiate thread object
sprintf(lv_name, "exampleTh");
gp_tmExampleThread = new CTmThreadExample(exampleThread_main, -2, (const char *) &lv_name);
if (!gp_tmExampleThread)
{
tm_log_event(DTM_TMTIMER_FAILED, SQ_LOG_CRIT, "DTM_TMTIMER_FAILED");
TMTrace(1, ("tm_start_exampleThread - Failed to instantiate example thread object\n"));
abort();
}
TMTrace(2, ("tm_start_exampleThread EXIT. Example thread %s(%p) started.\n",
lv_name, (void *) gp_tmExampleThread));
} //tm_start_exampleThread
// ---------------------------------------------------------
// tm_send_reply
// Purpose : send a reply
// Note this should only be used for cp and shutdown
// replies.
// ---------------------------------------------------------
void tm_send_reply(int32 pv_msgid, Tm_Rsp_Msg_Type *pp_rsp)
{
int lv_len = sizeof(Tm_Rsp_Msg_Type);
TMTrace( 2, ("tm_send_reply : ENTRY. msgid(%d), reply code(%d), error(%d).\n",
pv_msgid, pp_rsp->iv_msg_hdr.rr_type.reply_type,
pp_rsp->iv_msg_hdr.miv_err.error));
BMSG_REPLY_(pv_msgid, // msgid
NULL, // replyctrl
0, // replyctrlsize
(char *) pp_rsp, // replydata
lv_len, // replydatasize
0, // errorclass
NULL); // newphandle
}
// ---------------------------------------------------------
// tm_up_check
// Purpose : Check that DTM is up and, if not, reply
// to the client with an error.
// We need to allow active transasctions to continue to
// completion when shutting down and when transactions are
// disabled.
// Note that this function will delete pp_msg if there is
// an error, so it must not be used after the call if false
// is returned.
// pv_block default=true, block client in TM_LIB by returning
// FESERVICEDISABLED.
// false - don't block client on reply.
// --------------------------------------------------------
const bool TX_UNBLOCKED=false,
TX_BLOCKED=true;
bool tm_up_check(CTmTxMessage * pp_msg, bool pv_block=true)
{
bool lv_up = false;
short lv_replyCode = FEOK;
switch (gv_tm_info.state())
{
case TM_STATE_UP:
case TM_STATE_SHUTTING_DOWN:
case TM_STATE_TX_DISABLED:
case TM_STATE_TX_DISABLED_SHUTDOWN_PHASE1:
case TM_SYS_RECOV_STATE_END:
case TM_STATE_DRAIN:
lv_replyCode = FEOK;
lv_up = true;
break;
case TM_STATE_QUIESCE:
lv_replyCode = (pv_block)?FESERVICEDISABLED:FETMFNOTRUNNING;
lv_up = false;
break;
default:
lv_replyCode = FETMFNOTRUNNING;
lv_up = false;
}
if (!lv_up)
{
TMTrace(1, ("tm_up_check EXIT replying %d, up=%d.\n", lv_replyCode, lv_up));
pp_msg->reply(lv_replyCode);
delete pp_msg;
}
return lv_up;
}
// ---------------------------------------------------------
// tm_notx_check
// Purpose : Check that transaction exists, if not, reply
// to the client with an error.
// Note that this function will delete pp_msg if there is
// an error, so it must not be used after the call if false
// is returned
// --------------------------------------------------------
bool tm_notx_check(TM_TX_Info *pp_tx, CTmTxMessage *pp_msg)
{
if (pp_tx == NULL)
{
TMTrace(1, ("tm_notx_check - unable to complete, returning error FENOTRANSID\n"));
pp_msg->reply(FENOTRANSID);
delete pp_msg;
return false;
}
return true;
}
// ------------------------------------------------------------
// Process request methods
// ------------------------------------------------------------
// --------------------------------------------------------------
// process_req_abort
// Purpose : process message of type TM_MSG_TYPE_ABORTTRANSACTION
// ---------------------------------------------------------------
void tm_process_req_abort(CTmTxMessage * pp_msg)
{
TM_Txid_Internal * lp_transid = (TM_Txid_Internal *)
&pp_msg->request()->u.iv_abort_trans.iv_transid;
TMTrace(2, ("tm_process_req_abort, ID %d ENTRY\n", lp_transid->iv_seq_num));
if (!tm_up_check(pp_msg))
return;
TM_TX_Info *lp_tx = (TM_TX_Info*) gv_tm_info.get_tx(lp_transid);
if (!tm_notx_check(lp_tx, pp_msg))
return;
lp_tx->stats()->txnAbort()->start();
if (!gv_tm_info.multithreaded()) {
lp_tx->req_abort(pp_msg);
gv_tm_info.cleanup(lp_tx);
delete pp_msg;
}
else
lp_tx->queueToTransaction(lp_transid, pp_msg);
TMTrace(2, ("tm_process_req_abort EXIT\n"));
}
// ----------------------------------------------------------------
// tm_process_req_registerregion
// Purpose : process message of type TM_MSG_TYPE_REGISTERREGION
// ----------------------------------------------------------------
void tm_process_req_registerregion(CTmTxMessage * pp_msg)
{
TM_Txid_Internal * lp_transid = (TM_Txid_Internal *)
&pp_msg->request()->u.iv_register_region.iv_transid;
TM_Transseq_Type * lp_startid = (TM_Transseq_Type *)
&pp_msg->request()->u.iv_register_region.iv_startid;
TMTrace(2, ("tm_process_req_registerregion ENTRY for Txn ID (%d,%d), startid %ld, msgid %d\n",
lp_transid->iv_node, lp_transid->iv_seq_num, (long) lp_startid, pp_msg->msgid()));
TMTrace(3, ("tm_process_req_registerregion for Txn ID (%d,%d), startid %ld, with region %s\n",
lp_transid->iv_node, lp_transid->iv_seq_num, (long) lp_startid,
pp_msg->request()->u.iv_register_region.ia_regioninfo2));
TM_TX_Info *lp_tx = (TM_TX_Info*) gv_tm_info.get_tx(lp_transid);
if (!tm_notx_check(lp_tx, pp_msg))
return;
// lp_tx->req_registerRegion(pp_msg);
if (!gv_tm_info.multithreaded()) {
lp_tx->req_registerRegion(pp_msg);
delete pp_msg;
}
else{
lp_tx->queueToTransaction(lp_transid, pp_msg);
// Protect the reply as we may be trying to reply at the same time in the main thread.
lp_tx->lock();
if (lp_tx->transactionBusy() && pp_msg->replyPending())
pp_msg->reply(FEOK);
lp_tx->unlock();
}
TMTrace(2, ("tm_process_req_registerregion EXIT\n"));
} // tm_process_req_registerregion
// -----------------------------------------------------------------
// tm_process_req_ddlrequest
// Purpose : process message of type TM_MSG_TYPE_DDLREQUEST
// -----------------------------------------------------------------
void tm_process_req_ddlrequest(CTmTxMessage * pp_msg)
{
TM_Txid_Internal * lp_transid = (TM_Txid_Internal *)
&pp_msg->request()->u.iv_ddl_request.iv_transid;
TMTrace(2, ("tm_process_req_ddlrequest ENTRY for Txn ID (%d, %d) ", lp_transid->iv_node, lp_transid->iv_seq_num));
TM_TX_Info *lp_tx = (TM_TX_Info*) gv_tm_info.get_tx(lp_transid);
if (!gv_tm_info.multithreaded()) {
lp_tx->req_ddloperation(pp_msg);
pp_msg->reply(FEOK);
delete pp_msg;
}
else {
lp_tx->queueToTransaction(lp_transid, pp_msg);
}
TMTrace(2, ("tm_process_req_ddlrequest EXIT for Txn ID"));
}
//-----------------------------------------------------------------
// tm_process_req_requestregioninfo
// Purpose: process message of type TM_MSG_TYPE_REQUESTREGIONINFO
//-----------------------------------------------------------------
void tm_process_req_requestregioninfo(CTmTxMessage * pp_msg)
{
int64 lv_size = 0;
void **lp_tx_list = gv_tm_info.get_all_txs (&lv_size);
union
{
int64 lv_transid_int64;
TM_Txid_legacy lv_transid;
} u;
char tname[2000], ername[50], rname[100], offline[20], regid[200], hostname[200], port[100];
tname[299] = '\0', ername[49] = '\0', rname[99] = '\0', offline[19] = '\0';
regid[199]= '\0', hostname[199]='\0', port[99]='\0';
TMTrace(2, ("tm_process_req_requestregioninfo ENTRY.\n"));
HashMapArray* map = HbaseTM_process_request_regions_info();
TMTrace(2, ("tm_process_req_requestregioninfo HashMapArray call has finished.\n"));
pp_msg->response()->u.iv_hbaseregion_info.iv_count = 0;
for (int lv_inx = 0; ((lv_inx < TM_MAX_LIST_TRANS) && (lv_inx < lv_size)); lv_inx++)
{
TM_TX_Info *lp_current_tx = (TM_TX_Info *)lp_tx_list[lv_inx];
if (!lp_current_tx)
continue;
pp_msg->response()->u.iv_hbaseregion_info.iv_trans[lv_inx].iv_status = lp_current_tx->tx_state();
u.lv_transid.iv_seq_num = lp_current_tx->seqnum();
pp_msg->response()->u.iv_hbaseregion_info.iv_trans[lv_inx].iv_transid = u.lv_transid_int64;
pp_msg->response()->u.iv_hbaseregion_info.iv_trans[lv_inx].iv_nid = lp_current_tx->node();
pp_msg->response()->u.iv_hbaseregion_info.iv_trans[lv_inx].iv_seqnum = lp_current_tx->seqnum();
char* res2 = map->getRegionInfo(lp_current_tx->legacyTransid());
if(strlen(res2) == 0)
continue;
strncpy(tname, res2, sizeof(tname) -1);
strncpy(pp_msg->response()->u.iv_hbaseregion_info.iv_trans[lv_inx].iv_tablename, tname, sizeof(tname)-1);
/*
char* res3 = map->getEncodedRegionName(lv_inx);
strncpy(ername, res3, sizeof(ername) -1);
strncpy(pp_msg->response()->u.iv_hbaseregion_info.iv_trans[lv_inx].iv_enc_regionname, ername, sizeof(ername)-1);
char* res4 = map->getRegionName(lv_inx);
strncpy(rname, res4, sizeof(rname) -1);
strncpy(pp_msg->response()->u.iv_hbaseregion_info.iv_trans[lv_inx].iv_regionname, res4, strlen(res4));
char* res5 = map->getRegionOfflineStatus(lv_inx);
strncpy(offline, res5, sizeof(offline) -1);
strncpy(pp_msg->response()->u.iv_hbaseregion_info.iv_trans[lv_inx].iv_is_offline, offline, sizeof(offline)-1);
char* res6 = map->getRegionId(lv_inx);
strncpy(regid, res6, sizeof(regid) -1);
strncpy(pp_msg->response()->u.iv_hbaseregion_info.iv_trans[lv_inx].iv_region_id, regid, sizeof(regid)-1);
char* res7 = map->getHostName(lv_inx);
strncpy(hostname, res7, sizeof(hostname) -1);
strncpy(pp_msg->response()->u.iv_hbaseregion_info.iv_trans[lv_inx].iv_hostname, hostname, sizeof(hostname)-1);
char* res8 = map->getPort(lv_inx);
strncpy(port, res8, sizeof(port) -1);
strncpy(pp_msg->response()->u.iv_hbaseregion_info.iv_trans[lv_inx].iv_port, port, sizeof(port)-1);
*/
pp_msg->response()->u.iv_hbaseregion_info.iv_count++;
}
if (lp_tx_list)
delete []lp_tx_list;
pp_msg->reply(FEOK);
delete pp_msg;
delete map;
TMTrace(2, ("tm_process_req_requestregioninfo EXIT\n"));
}
// ----------------------------------------------------------------
// tm_process_req_GetNextSeqNum
// Purpose : Retrieve the next transaction sequence number
// block. This is used to implement local transactions
// in Trafodion.
// ----------------------------------------------------------------
void tm_process_req_GetNextSeqNum(CTmTxMessage * pp_msg)
{
TMTrace(2, ("tm_process_req_GetNextSeqNum ENTRY.\n"));
gv_tm_info.lock();
gv_tm_info.tm_new_seqNumBlock(pp_msg->request()->u.iv_GetNextSeqNum.iv_block_size,
&pp_msg->response()->u.iv_GetNextSeqNum.iv_seqNumBlock_start,
&pp_msg->response()->u.iv_GetNextSeqNum.iv_seqNumBlock_count);
gv_tm_info.unlock();
pp_msg->reply(FEOK);
TMTrace(2, ("tm_process_req_GetNextSeqNum EXIT returning Next seqNum start %d, block size %d\n",
pp_msg->response()->u.iv_GetNextSeqNum.iv_seqNumBlock_start,
pp_msg->response()->u.iv_GetNextSeqNum.iv_seqNumBlock_count));
delete pp_msg;
} // tm_process_req_GetNextSeqNum
// ----------------------------------------------------------------
// tm_process_req_begin
// Purpose : process message of type TM_MSG_TYPE_BEGINTRANSACTION
// ----------------------------------------------------------------
void tm_process_req_begin(CTmTxMessage * pp_msg)
{
short lv_error = FEOK;
TMTrace(2, ("tm_process_req_begin ENTRY\n"));
if ((gv_tm_info.state() != TM_STATE_UP) ||
(gv_tm_info.sys_recov_state() != TM_SYS_RECOV_STATE_END))
{
switch (gv_tm_info.state())
{
case TM_STATE_TX_DISABLED:
case TM_STATE_DRAIN:
lv_error = FEBEGINTRDISABLED;
break;
case TM_STATE_QUIESCE:
lv_error = FESERVICEDISABLED;
break;
default:
lv_error = FETMFNOTRUNNING;
}
TMTrace(1, ("tm_process_req_begin returning error %d.\n", lv_error));
pp_msg->reply(lv_error);
delete pp_msg;
return;
}
// Instantiate a new tx object.
TM_TX_Info *lp_tx = (TM_TX_Info *) gv_tm_info.new_tx(pp_msg->request()->u.iv_begin_trans.iv_nid,
pp_msg->request()->u.iv_begin_trans.iv_pid,
-1, -1,
(void* (*)(long int)) &TM_TX_Info::constructPoolElement);
// An error indicates we are handling our maximum number of concurrent
// transactions.
if (lp_tx == NULL)
{
// Removing this event for now as we keep hitting it and it's just announcing that
// we've reached the maximum transactions allowed per node.
//tm_log_event(DTM_TX_MAX_EXCEEDED, SQ_LOG_WARNING, "DTM_TX_MAX_EXCEEDED",
// -1, /*error_code*/
// -1, /*rmid*/
// gv_tm_info.nid(), /*dtmid*/
// -1, /*seq_num*/
// -1, /*msgid*/
// -1, /*xa_error*/
// gv_tm_info.transactionPool()->get_maxPoolSize(), /*pool_size*/
// gv_tm_info.transactionPool()->totalElements() /*pool_elems*/);
TMTrace(1, ("tm_process_req_begin, FETOOMANYTRANSBEGINS\n"));
pp_msg->reply(FETOOMANYTRANSBEGINS);
delete pp_msg;
return;
}
lp_tx->lock();
lp_tx->setAbortTimeout(pp_msg->request()->u.iv_begin_trans.iv_abort_timeout);
lp_tx->TT_flags(pp_msg->request()->u.iv_begin_trans.iv_transactiontype_bits);
// Start statistics counters
lp_tx->stats()->txnTotal()->start();
lp_tx->stats()->txnBegin()->start();
//M8 eliminate the association with the transaction as there is
// nothing more to do now that we don't support xa_start
lp_tx->req_begin(pp_msg);
lp_tx->unlock();
// Since we're not queuing requests, we can delete pp_req here itself.
delete pp_msg;
TMTrace(2, ("tm_process_req_begin, ID (%d,%d), creator (%d,%d) EXIT\n",
lp_tx->node(), lp_tx->seqnum(), lp_tx->ender_nid(), lp_tx->ender_pid()));
}
// --------------------------------------------------------------
// process_req_doomtx
// Purpose : process message of type TM_MSG_TYPE_DOOMTX
// DOOMTRANSACTION marks the transaction for rollback and
// replies immediately. Then in the background it drives
// rollback.
// or ABORTTRANSACTION.
// ---------------------------------------------------------------
void tm_process_req_doomtx(CTmTxMessage * pp_msg)
{
TM_Txid_Internal * lp_transid = (TM_Txid_Internal *)
&pp_msg->request()->u.iv_abort_trans.iv_transid;
TMTrace(2, ("tm_process_req_doomtx ID %d ENTRY\n", lp_transid->iv_seq_num));
if (!tm_up_check(pp_msg))
return;
TM_TX_Info *lp_tx = (TM_TX_Info*) gv_tm_info.get_tx(lp_transid);
if (!tm_notx_check(lp_tx, pp_msg))
return;
int16 lv_error = lp_tx->doom_txn();
pp_msg->reply(lv_error);
delete pp_msg;
TMTrace(2, ("tm_process_req_doomtx EXIT\n"));
} //process_req_doomtx
// --------------------------------------------------------------
// process_req_TSE_doomtx
// Purpose : process message of type TM_MSG_TYPE_TSE_DOOMTX
// This is different from an application doomtransaction because
// it drives an immediate rollback. This is necessary because
// the TSE might be dooming the transaction because we have hit
// an audit threshold and can't allow the transaction to continue.
// ---------------------------------------------------------------
void tm_process_req_TSE_doomtx(CTmTxMessage * pp_msg)
{
TM_Txid_Internal * lp_transid = (TM_Txid_Internal *)
&pp_msg->request()->u.iv_abort_trans.iv_transid;
TMTrace(2, ("tm_process_req_TSE_doomtx, ID %d ENTRY\n", lp_transid->iv_seq_num));
if (!tm_up_check(pp_msg, TX_UNBLOCKED))
return;
TM_TX_Info *lp_tx = (TM_TX_Info*) gv_tm_info.get_tx(lp_transid);
if (!tm_notx_check(lp_tx, pp_msg))
return;
if (lp_tx->isAborting())
{
TMTrace(1, ("tm_process_req_TSE_doomtx, already doomed.\n"));
pp_msg->reply(FEOK);
delete pp_msg;
}
else
{
int16 lv_error = lp_tx->doom_txn();
pp_msg->reply(lv_error);
lp_tx->queueToTransaction(lp_transid, pp_msg);
}
TMTrace(2, ("tm_process_req_TSE_doomtx EXIT\n"));
}
// --------------------------------------------------------------
// process_req_wait_tmup
// Purpose : Wait until the TM is up, and only then reply.
// This can be used by an application to wait for DTM to be ready
// to process transactions.
// ---------------------------------------------------------------
void tm_process_req_wait_tmup(CTmTxMessage * pp_msg)
{
TMTrace(2, ("tm_process_req_wait_tmup ENTRY\n"));
if ((gv_tm_info.state() == TM_STATE_UP) &&
(gv_tm_info.sys_recov_state() == TM_SYS_RECOV_STATE_END))
{
TMTrace(3, ("tm_process_req_wait_tmup : TM up, replying immediately.\n"));
pp_msg->reply(FEOK);
delete pp_msg;
}
else
{
TMTrace(3, ("tm_process_req_wait_tmup : Adding caller msgid(%d) to TMUP_Wait list.\n",
pp_msg->msgid()));
gv_tm_info.TMUP_wait_list()->push(pp_msg);
}
TMTrace(2, ("tm_process_req_wait_tmup EXIT\n"));
} //process_req_wait_tmup
// --------------------------------------------------------------
// Purpose : process message of type TM_MSG_TYPE_ENDRANSACTION
// ---------------------------------------------------------------
void tm_process_req_end(CTmTxMessage * pp_msg)
{
TM_Txid_Internal * lp_transid = (TM_Txid_Internal *)
&pp_msg->request()->u.iv_end_trans.iv_transid;
TMTrace(1, ("tm_process_req_end, ID %d ENTRY\n", lp_transid->iv_seq_num));
if (!tm_up_check(pp_msg))
return;
TM_TX_Info *lp_tx = (TM_TX_Info*) gv_tm_info.get_tx(lp_transid);
if (!tm_notx_check(lp_tx, pp_msg))
return;
lp_tx->stats()->txnCommit()->start();
if (!gv_tm_info.multithreaded()) {
lp_tx->req_end(pp_msg);
lp_tx->req_forget(pp_msg);
gv_tm_info.cleanup(lp_tx);
delete pp_msg;
}
else
lp_tx->queueToTransaction(lp_transid, pp_msg);
TMTrace(2, ("tm_process_req_end, ID %d EXIT\n", lp_transid->iv_seq_num));
}
// ------------------------------------------------------------------
// tm_process_req_join_trans
// Purpose : process message of type TM_MSG_TYPE_JOINTRANSACTION
// ------------------------------------------------------------------
void tm_process_req_join_trans(CTmTxMessage * pp_msg)
{
TM_Txid_Internal *lp_transid = (TM_Txid_Internal *)
&pp_msg->request()->u.iv_join_trans.iv_transid;
TMTrace(2, ("tm_process_req_join_trans, ID %d, ENTRY\n",
lp_transid->iv_seq_num));
if (!tm_up_check(pp_msg, TX_UNBLOCKED))
return;
TM_TX_Info *lp_tx = (TM_TX_Info *)gv_tm_info.get_tx(lp_transid);
if (!tm_notx_check(lp_tx, pp_msg))
return;
// Call join in-line in main thread
lp_tx->req_join(pp_msg);
// Since we don't queue join requests, we can delete pp_req here itself.
delete pp_msg;
TMTrace(2, ("tm_process_req_join_trans EXIT\n"));
}
// -----------------------------------------------------------------
// tm_process_req_list
// Purpose : Process a list transactions request.
// ----------------------------------------------------------------
void tm_process_req_list(CTmTxMessage *pp_msg)
{
int64 lv_size = 0;
void **lp_tx_list = gv_tm_info.get_all_txs (&lv_size);
union
{
int64 lv_transid_int64;
TM_Txid_legacy lv_transid;
} u;
TMTrace(2, ("tm_process_req_list ENTRY.\n"));
pp_msg->response()->u.iv_list_trans.iv_count = 0;
for (int lv_inx = 0; ((lv_inx < TM_MAX_LIST_TRANS) && (lv_inx < lv_size)); lv_inx++)
{
TM_TX_Info *lp_current_tx = (TM_TX_Info *)lp_tx_list[lv_inx];
if (!lp_current_tx)
break;
pp_msg->response()->u.iv_list_trans.iv_trans[lv_inx].iv_status = lp_current_tx->tx_state();
u.lv_transid.iv_seq_num = lp_current_tx->seqnum();
u.lv_transid.iv_node = lp_current_tx->node();
pp_msg->response()->u.iv_list_trans.iv_trans[lv_inx].iv_transid = u.lv_transid_int64;
pp_msg->response()->u.iv_list_trans.iv_trans[lv_inx].iv_nid = lp_current_tx->node();
pp_msg->response()->u.iv_list_trans.iv_trans[lv_inx].iv_seqnum = lp_current_tx->seqnum();
pp_msg->response()->u.iv_list_trans.iv_trans[lv_inx].iv_tag = lp_current_tx->tag();
pp_msg->response()->u.iv_list_trans.iv_trans[lv_inx].iv_owner_nid = lp_current_tx->ender_nid();
pp_msg->response()->u.iv_list_trans.iv_trans[lv_inx].iv_owner_pid = lp_current_tx->ender_pid();
pp_msg->response()->u.iv_list_trans.iv_trans[lv_inx].iv_event_count = lp_current_tx->eventQ()->size();
pp_msg->response()->u.iv_list_trans.iv_trans[lv_inx].iv_pendingRequest_count = lp_current_tx->PendingRequestQ()->size();
pp_msg->response()->u.iv_list_trans.iv_trans[lv_inx].iv_num_active_partic = lp_current_tx->num_active_partic();
pp_msg->response()->u.iv_list_trans.iv_trans[lv_inx].iv_num_partic_RMs = lp_current_tx->get_TSEBranchesParticCount();
pp_msg->response()->u.iv_list_trans.iv_trans[lv_inx].iv_XARM_branch = false; //TODO
pp_msg->response()->u.iv_list_trans.iv_trans[lv_inx].iv_transactionBusy = lp_current_tx->transactionBusy();
pp_msg->response()->u.iv_list_trans.iv_trans[lv_inx].iv_mark_for_rollback = lp_current_tx->mark_for_rollback();
pp_msg->response()->u.iv_list_trans.iv_trans[lv_inx].iv_tm_aborted = (lp_current_tx->tm_aborted()|lp_current_tx->tse_aborted());
pp_msg->response()->u.iv_list_trans.iv_trans[lv_inx].iv_read_only = lp_current_tx->read_only();
pp_msg->response()->u.iv_list_trans.iv_trans[lv_inx].iv_recovering = lp_current_tx->recovering();
pp_msg->response()->u.iv_list_trans.iv_count++;
}
if (lp_tx_list)
delete []lp_tx_list;
pp_msg->reply(FEOK);
delete pp_msg;
TMTrace(2, ("tm_process_req_list EXIT.\n"));
} //tm_process_req_list
//----------------------------------------------------------------
// tm_process_req_status_all_transmgmt
// Purpose : Process status of all transactions of type
// TM_MSG_TYPE_STATUSALLTRANSMGT
// ----------------------------------------------------------------
void tm_process_req_status_all_transmgmt(CTmTxMessage *pp_msg)
{
int64 lv_size = 0;
void **lp_tx_list = gv_tm_info.get_all_txs (&lv_size);
union
{
int64 lv_transid_int64;
TM_Txid_legacy lv_transid;
} u;
TMTrace(2, ("tm_process_req_status_all_transmgmt ENTRY.\n"));
pp_msg->response()->u.iv_status_alltrans.iv_count = 0;
for (int lv_inx = 0; ((lv_inx < TM_MAX_LIST_TRANS) && (lv_inx < lv_size)); lv_inx++)
{
TM_TX_Info *lp_current_tx = (TM_TX_Info *)lp_tx_list[lv_inx];
if (!lp_current_tx)
break;
pp_msg->response()->u.iv_status_alltrans.iv_trans[lv_inx].iv_status = lp_current_tx->tx_state();
u.lv_transid.iv_seq_num = lp_current_tx->seqnum();
u.lv_transid.iv_node = lp_current_tx->node();
pp_msg->response()->u.iv_status_alltrans.iv_trans[lv_inx].iv_transid = u.lv_transid_int64;
pp_msg->response()->u.iv_status_alltrans.iv_trans[lv_inx].iv_timestamp = lp_current_tx->timestamp();
pp_msg->response()->u.iv_status_alltrans.iv_trans[lv_inx].iv_nid = lp_current_tx->node();
pp_msg->response()->u.iv_status_alltrans.iv_trans[lv_inx].iv_seqnum = lp_current_tx->seqnum();
pp_msg->response()->u.iv_status_alltrans.iv_trans[lv_inx].iv_tag = lp_current_tx->tag();
pp_msg->response()->u.iv_status_alltrans.iv_trans[lv_inx].iv_owner_nid = lp_current_tx->ender_nid();
pp_msg->response()->u.iv_status_alltrans.iv_trans[lv_inx].iv_owner_pid = lp_current_tx->ender_pid();
pp_msg->response()->u.iv_status_alltrans.iv_trans[lv_inx].iv_event_count = lp_current_tx->eventQ()->size();
pp_msg->response()->u.iv_status_alltrans.iv_trans[lv_inx].iv_pendingRequest_count = lp_current_tx->PendingRequestQ()->size();
pp_msg->response()->u.iv_status_alltrans.iv_trans[lv_inx].iv_num_active_partic = lp_current_tx->num_active_partic();
pp_msg->response()->u.iv_status_alltrans.iv_trans[lv_inx].iv_num_partic_RMs = lp_current_tx->get_TSEBranchesParticCount();
pp_msg->response()->u.iv_status_alltrans.iv_trans[lv_inx].iv_num_unresolved_RMs = lp_current_tx->get_TSEBranchesUnresolvedCount();
pp_msg->response()->u.iv_status_alltrans.iv_trans[lv_inx].iv_XARM_branch = false; //TODO
pp_msg->response()->u.iv_status_alltrans.iv_trans[lv_inx].iv_transactionBusy = lp_current_tx->transactionBusy();
pp_msg->response()->u.iv_status_alltrans.iv_trans[lv_inx].iv_mark_for_rollback = lp_current_tx->mark_for_rollback();
pp_msg->response()->u.iv_status_alltrans.iv_trans[lv_inx].iv_tm_aborted = (lp_current_tx->tm_aborted()|lp_current_tx->tse_aborted());
pp_msg->response()->u.iv_status_alltrans.iv_trans[lv_inx].iv_read_only = lp_current_tx->read_only();
pp_msg->response()->u.iv_status_alltrans.iv_trans[lv_inx].iv_recovering = lp_current_tx->recovering();
pp_msg->response()->u.iv_status_alltrans.iv_count++;
}
if (lp_tx_list)
delete []lp_tx_list;
pp_msg->reply(FEOK);
delete pp_msg;
TMTrace(2, ("tm_process_req_status_all_transmgmt EXIT.\n"));
}// tm_process_req_status_all_transmgmt
// -----------------------------------------------------------------
// tm_process_req_tmstats
// Purpose : process message of type TM_MSG_TYPE_TMSTATS to
// list TM statistics.
// ----------------------------------------------------------------
void tm_process_req_tmstats(CTmTxMessage *pp_msg)
{
TMTrace(2, ("tm_process_req_tmstats ENTRY.\n"));
gv_tm_info.stats()->readStats(&pp_msg->response()->u.iv_tmstats.iv_stats);
if (pp_msg->request()->u.iv_tmstats.iv_reset) {
gv_tm_info.clearCounts();
gv_tm_info.stats()->clearCounters();
}
pp_msg->reply(FEOK);
delete pp_msg;
TMTrace(2, ("tm_process_req_tmstats EXIT.\n"));
} //tm_process_req_tmstats
// -----------------------------------------------------------------
// tm_process_req_attachrm
// Purpose : process message of type TM_MSG_TYPE_ATTACHRM to
// return the status of this TM.
// ----------------------------------------------------------------
void tm_process_req_attachrm(CTmTxMessage *pp_msg)
{
TMTrace(2, ("tm_process_req_attachrm ENTRY fpr %s.\n", pp_msg->request()->u.iv_attachrm.ia_rmname));
gv_tm_info.addTimerEvent(pp_msg, 0 /*execute now*/);
pp_msg->reply(FEOK);
delete pp_msg;
TMTrace(2, ("tm_process_req_attachrm EXIT.\n"));
}
// -----------------------------------------------------------------
// tm_process_req_statustm
// Purpose : process message of type TM_MSG_TYPE_STATUSTM to
// return the status of this TM.
// ----------------------------------------------------------------
void tm_process_req_statustm(CTmTxMessage *pp_msg)
{
RM_Info_TSEBranch *lp_rm;
TMTrace(2, ("tm_process_req_statustm ENTRY.\n"));
pp_msg->response()->u.iv_statustm.iv_status.iv_node = gv_tm_info.nid();
pp_msg->response()->u.iv_statustm.iv_status.iv_isLeadTM = gv_tm_info.lead_tm();
pp_msg->response()->u.iv_statustm.iv_status.iv_state = gv_tm_info.state();
pp_msg->response()->u.iv_statustm.iv_status.iv_sys_recovery_state = gv_tm_info.sys_recov_state();
pp_msg->response()->u.iv_statustm.iv_status.iv_shutdown_level = gv_tm_info.shutdown_level();
pp_msg->response()->u.iv_statustm.iv_status.iv_incarnation_num = gv_tm_info.incarnation_num();
pp_msg->response()->u.iv_statustm.iv_status.iv_number_active_txns = gv_tm_info.num_active_txs();
// Pick up any queued indoubt transactions
if (gv_tm_info.ClusterRecov())
pp_msg->response()->u.iv_statustm.iv_status.iv_number_active_txns += gv_tm_info.ClusterRecov()->txnStateList()->size();
pp_msg->response()->u.iv_statustm.iv_status.iv_is_isolated = gv_tm_info.leadTM_isolated();
if (gv_RMs.TSE()->return_highest_index_used() == 0) {
lp_rm = gv_RMs.TSE()->return_slot_by_index(0);
if(lp_rm->in_use()) {
pp_msg->response()->u.iv_statustm.iv_status.iv_rm_count = 1;
lp_rm->copyto(&pp_msg->response()->u.iv_statustm.iv_status.ia_rminfo[0]);
}
else {
pp_msg->response()->u.iv_statustm.iv_status.iv_rm_count = 0;
}
}
else {
pp_msg->response()->u.iv_statustm.iv_status.iv_rm_count = gv_RMs.TSE()->return_highest_index_used() + 1;
for (int i=0; i<=gv_RMs.TSE()->return_highest_index_used(); i++)
{
lp_rm = gv_RMs.TSE()->return_slot_by_index(i);
lp_rm->copyto(&pp_msg->response()->u.iv_statustm.iv_status.ia_rminfo[i]);
}
}
pp_msg->reply(FEOK);
delete pp_msg;
TMTrace(2, ("tm_process_req_statustm EXIT.\n"));
} //tm_process_req_statustm
// -----------------------------------------------------------------
// tm_process_req_status_transmgmt
// Purpose : process message of type TM_MSG_TYPE_STATUS_TRANSMGMT to
// return the status of the transaction.
// ----------------------------------------------------------------
void tm_process_req_status_transmgmt(CTmTxMessage *pp_msg)
{
TMTrace(2, ("tm_process_req_status_transmgmt ENTRY.\n"));
TM_Txid_Internal *lp_transid = (TM_Txid_Internal *)
&pp_msg->request()->u.iv_status_transm.iv_transid;
TM_Transid lv_transid(*lp_transid);
//should already be sent to the correct TM
TM_TX_Info *lp_tx = (TM_TX_Info *)gv_tm_info.get_tx(lp_transid);
if(!lp_tx) {
pp_msg->reply(FENOTRANSID);
delete pp_msg;
return;
}
TM_Transid lv_fulltransid(*(lp_tx->transid()));
lv_fulltransid.set_external_data_type(&pp_msg->response()->u.iv_status_transm.iv_status_trans.iv_transid);
pp_msg->response()->u.iv_status_transm.iv_status_trans.iv_status = lp_tx->tx_state();
pp_msg->response()->u.iv_status_transm.iv_status_trans.iv_nid = lp_tx->node();
pp_msg->response()->u.iv_status_transm.iv_status_trans.iv_seqnum = lp_tx->seqnum();
pp_msg->response()->u.iv_status_transm.iv_status_trans.iv_incarnation_num = lv_transid.get_incarnation_num();
pp_msg->response()->u.iv_status_transm.iv_status_trans.iv_tx_flags = lv_transid.get_tx_flags();
pp_msg->response()->u.iv_status_transm.iv_status_trans.iv_tt_flags = lp_tx->TT_flags();
pp_msg->response()->u.iv_status_transm.iv_status_trans.iv_owner_nid = lp_tx->ender_nid();
pp_msg->response()->u.iv_status_transm.iv_status_trans.iv_owner_pid = lp_tx->ender_pid();
pp_msg->response()->u.iv_status_transm.iv_status_trans.iv_event_count = lp_tx->eventQ()->size();
pp_msg->response()->u.iv_status_transm.iv_status_trans.iv_pendingRequest_count = lp_tx->PendingRequestQ()->size();
pp_msg->response()->u.iv_status_transm.iv_status_trans.iv_num_active_partic = lp_tx->num_active_partic();
pp_msg->response()->u.iv_status_transm.iv_status_trans.iv_num_partic_RMs = lp_tx->get_TSEBranchesParticCount();
pp_msg->response()->u.iv_status_transm.iv_status_trans.iv_XARM_branch = false;
pp_msg->response()->u.iv_status_transm.iv_status_trans.iv_transactionBusy = lp_tx->transactionBusy();
pp_msg->response()->u.iv_status_transm.iv_status_trans.iv_mark_for_rollback = lp_tx->mark_for_rollback();
pp_msg->response()->u.iv_status_transm.iv_status_trans.iv_tm_aborted = lp_tx->tm_aborted();
pp_msg->response()->u.iv_status_transm.iv_status_trans.iv_abort_flags = lp_tx->abort_flags();
pp_msg->response()->u.iv_status_transm.iv_status_trans.iv_read_only = lp_tx->read_only();
pp_msg->response()->u.iv_status_transm.iv_status_trans.iv_recovering = lp_tx->recovering();
pp_msg->reply(FEOK);
delete pp_msg;
TMTrace(2, ("tm_process_req_status_transmgmt EXIT.\n"));
} //tm_process_req_status_transmgmt
// -----------------------------------------------------------------
// tm_process_req_status_gettransinfo
// Purpose : process message of type TM_MSG_TYPE_GETTRANSINFO to
// return the trans ID information
// ----------------------------------------------------------------
void tm_process_req_status_gettransinfo(CTmTxMessage *pp_msg)
{
TMTrace(2, ("tm_process_req_status_gettransinfo ENTRY.\n"));
TM_Txid_Internal *lp_transid = (TM_Txid_Internal *)
&pp_msg->request()->u.iv_status_transm.iv_transid;
TM_Transid lv_transid(*lp_transid);
union
{
int64 lv_tt_flags_int64;
TM_TT_Flags lv_tt_flags;
} u;
//should already be sent to the correct TM
TM_TX_Info *lp_tx = (TM_TX_Info *)gv_tm_info.get_tx(lp_transid);
if(!lp_tx) {
pp_msg->reply(FENOTRANSID);
delete pp_msg;
return;
}
TM_Transid lv_fulltransid(*(lp_tx->transid()));
pp_msg->response()->u.iv_gettransinfo.iv_seqnum = lp_tx->seqnum();
pp_msg->response()->u.iv_gettransinfo.iv_node = lp_tx->node();
pp_msg->response()->u.iv_gettransinfo.iv_incarnation_num = lv_fulltransid.get_incarnation_num();
pp_msg->response()->u.iv_gettransinfo.iv_tx_flags = lv_fulltransid.get_tx_flags();
u.lv_tt_flags_int64 = lp_tx->TT_flags();
pp_msg->response()->u.iv_gettransinfo.iv_tt_flags = u.lv_tt_flags;
pp_msg->response()->u.iv_gettransinfo.iv_version = lv_fulltransid.get_version();
pp_msg->response()->u.iv_gettransinfo.iv_checksum = lv_fulltransid.get_check_sum();
pp_msg->response()->u.iv_gettransinfo.iv_timestamp = lv_fulltransid.get_timestamp();
pp_msg->reply(FEOK);
delete pp_msg;
TMTrace(2, ("tm_process_req_status_gettransinfo EXIT.\n"));
} //tm_process_req_gettransinfo
// -----------------------------------------------------------------
// tm_process_req_leadtm
// Purpose : process message of type TM_MSG_TYPE_LEADTM to
// return the current Lead TMs nid.
// ----------------------------------------------------------------
void tm_process_req_leadtm(CTmTxMessage *pp_msg)
{
TMTrace(2, ("tm_process_req_leadtm ENTRY.\n"));
pp_msg->response()->u.iv_leadtm.iv_node = gv_tm_info.lead_tm_nid();
pp_msg->reply(FEOK);
delete pp_msg;
TMTrace(2, ("tm_process_req_leadtm EXIT.\n"));
} //tm_process_req_leadtm
// -----------------------------------------------------------------
// tm_process_req_enabletrans
// Purpose : process message of type TM_MSG_TYPE_ENABLETRANS to
// enable transaction processing in DTM.
// This can only be executed by the Lead TM. Non-lead TMs will
// return FEDEVDOWN.
// ----------------------------------------------------------------
void tm_process_req_enabletrans(CTmTxMessage *pp_msg)
{
short lv_error = FEOK;
TMTrace(2, ("tm_process_req_enabletrans ENTRY.\n"));
if (!gv_tm_info.lead_tm())
lv_error = FEDEVDOWN;
else
switch (gv_tm_info.state())
{
case TM_STATE_QUIESCE:
case TM_STATE_DRAIN:
lv_error = FEINVALOP;
break;
case TM_STATE_TX_DISABLED:
default:
// Queue the enabletransaction to the timer thread for execution.
gv_tm_info.addTimerEvent(pp_msg, 0 /*execute now*/);
}
// Reply immediately and leave the enable to run in the background.
pp_msg->reply(lv_error);
delete pp_msg;
TMTrace(2, ("tm_process_req_enabletrans EXIT.\n"));
} //tm_process_req_enabletrans
// -----------------------------------------------------------------
// tm_process_req_disabletrans
// Purpose : process message of type TM_MSG_TYPE_DISABLETRANS to
// disable transaction processing in DTM.
// This can only be executed by the Lead TM. Non-lead TMs will
// return FEDEVDOWN.
// ----------------------------------------------------------------
void tm_process_req_disabletrans(CTmTxMessage *pp_msg)
{
short lv_error = FEOK;
char lv_levelStr[20],
*lp_levelStr = (char *) &lv_levelStr;
switch (pp_msg->request()->u.iv_disabletrans.iv_shutdown_level)
{
case TM_DISABLE_SHUTDOWN_IMMEDIATE:
strcpy(lp_levelStr, "Immediate");
break;
case TM_DISABLE_SHUTDOWN_NORMAL:
strcpy(lp_levelStr, "Normal");
break;
default:
strcpy(lp_levelStr, "** Invalid **");
}
TMTrace(2, ("tm_process_req_disabletrans ENTRY, level %s.\n", lp_levelStr));
if (!gv_tm_info.lead_tm())
lv_error = FEDEVDOWN;
else
if (gv_tm_info.state() == TM_STATE_UP ||
gv_tm_info.state() == TM_STATE_TX_DISABLED ||
(gv_tm_info.state() == TM_STATE_TX_DISABLED_SHUTDOWN_PHASE1 &&
pp_msg->request()->u.iv_disabletrans.iv_shutdown_level == TM_DISABLE_SHUTDOWN_IMMEDIATE) ||
gv_tm_info.state() == TM_STATE_DRAIN)
{
// For disabletrans normal shutdown, only queue the disabletransaction if all TMs have recovered.
if (pp_msg->request()->u.iv_disabletrans.iv_shutdown_level != TM_DISABLE_SHUTDOWN_NORMAL ||
gv_tm_info.all_tms_recovered())
gv_tm_info.addTimerEvent(pp_msg, 0 /*execute now*/);
else
lv_error = FERETRY;
}
else
lv_error = FEINVALOP;
// Reply immediately and leave the disable to run in the background.
pp_msg->reply(lv_error);
delete pp_msg;
TMTrace(2, ("tm_process_req_disabletrans EXIT, replied with error %d.\n", lv_error));
} //tm_process_req_disabletrans
// -----------------------------------------------------------------
// tm_process_req_draintrans
// Purpose : process message of type TM_MSG_TYPE_DRAINTRANS to
// drain transaction processing in this TM.
// This can be executed in any TM. It is used to allow transactions
// to complete before a planned node outage.
// Immediate means abort all active transactions and overrides a
// prior drain
// ----------------------------------------------------------------
void tm_process_req_draintrans(CTmTxMessage *pp_msg)
{
short lv_error = FEOK;
TMTrace(2, ("tm_process_req_draintrans ENTRY immediate=%d.\n",
pp_msg->request()->u.iv_draintrans.iv_immediate));
if (gv_tm_info.state() == TM_STATE_UP ||
gv_tm_info.state() == TM_STATE_DRAIN)
gv_tm_info.drainTrans(pp_msg);
else
lv_error = FEINVALOP;
// Reply immediately and leave the drain to run in the background.
pp_msg->reply(lv_error);
delete pp_msg;
TMTrace(2, ("tm_process_req_draintrans EXIT error=%d.\n", lv_error));
} //tm_process_req_draintrans
// -----------------------------------------------------------------
// tm_process_req_status
// Purpose : process message of type TM_MSG_TYPE_STATUSTRANSACTION
// ----------------------------------------------------------------
void tm_process_req_status(CTmTxMessage * pp_msg)
{
TM_Txid_Internal *lp_transid = (TM_Txid_Internal *)
&pp_msg->request()->u.iv_status_trans.iv_transid;
TMTrace(2, ("tm_process_req_status, ID %d, ENTRY\n",
lp_transid->iv_seq_num));
if (!tm_up_check(pp_msg))
return;
TM_TX_Info *lp_tx = (TM_TX_Info *)gv_tm_info.get_tx(lp_transid);
pp_msg->response()->u.iv_status_trans.iv_status = TM_TX_STATE_NOTX;
if (!tm_notx_check(lp_tx, pp_msg))
return;
// Handle status request in main thread to avoid status
// getting queued behind other requests.
lp_tx->req_status(pp_msg);
// Since we don't queue status requests, we can delete pp_msg here itself.
delete pp_msg;
TMTrace(2, ("tm_process_req_status EXIT\n"));
}
// ---------------------------------------------------------------
// process_req_suspend_trans
// Purpose : process request of type TM_MSG_TYPE_SUSPENDTRANSACTION
// ---------------------------------------------------------------
void tm_process_req_suspend_trans (CTmTxMessage * pp_msg)
{
TM_Txid_Internal *lp_transid = (TM_Txid_Internal *)
&pp_msg->request()->u.iv_suspend_trans.iv_transid;
TMTrace(2, ("tm_process_req_suspend_trans, ID %d, ENTRY\n",
lp_transid->iv_seq_num));
if (!tm_up_check(pp_msg))
return;
TM_TX_Info *lp_tx = (TM_TX_Info *)gv_tm_info.get_tx(lp_transid);
if (!tm_notx_check(lp_tx, pp_msg))
return;
// Call suspend in-line in main thread
lp_tx->req_suspend(pp_msg);
// Since we don't queue suspend requests, we can delete pp_msg here itself.
delete pp_msg;
TMTrace(2, ("tm_process_req_suspend_trans EXIT\n"));
}
// -----------------------------------------------------------------
// tm_process_req_broadcast
// Purpose - process a broadcast for sync data
// ----------------------------------------------------------------
void tm_process_req_broadcast (BMS_SRE *pp_sre,
Tm_Broadcast_Req_Type *pp_req, Tm_Broadcast_Rsp_Type *pp_rsp)
{
TMTrace(2, ("tm_process_req_broadcast for node %d ENTRY\n",
pp_req->iv_node));
ushort lv_len = sizeof(Tm_Broadcast_Rsp_Type);
gv_tm_info.unpack_sync_buffer (pp_req, pp_req->iv_node);
if (pp_req->iv_state_up) // last one, can be considered up
{
gv_tm_info.can_takeover(true);
gv_tm_info.tm_up(); // up for processing
}
XMSG_REPLY_(pp_sre->sre_msgId, // msgid
NULL, // replyctrl
0, // replyctrlsize
(char *) pp_rsp, // replydata
lv_len, // replydatasize
0, // errorclass
NULL); // newphandle
TMTrace(2, ("tm_process_req_broadcast EXIT\n"));
}
// --------------------------------------------------------------------
// ax_* methods
// --------------------------------------------------------------------
// --------------------------------------------------------------------
// tm_process_req_ax_reg
// Purpose : process message of type TM_MSG_TYPE_AX_REG
// --------------------------------------------------------------------
void tm_process_req_ax_reg (CTmTxMessage * pp_msg)
{
short lv_error = FEOK;
int lv_ptype = -1;
int lv_nid = -1;
int lv_pid = -1;
int lv_seq_num = 0;
int32 lv_rmid = pp_msg->request()->u.iv_ax_reg.iv_rmid;
TM_Txid_Internal *lp_transid = (TM_Txid_Internal *)
&pp_msg->request()->u.iv_ax_reg.iv_txid;
TMTrace(2, ("tm_process_req_ax_reg, ID (%d,%d), ENTRY msgid %d\n",
lp_transid->iv_node, lp_transid->iv_seq_num, pp_msg->msgid()));
// Removed check here because ax_reg needs to work during system recovery in M6.
//if (!tm_up_check(pp_msg))
// return;
TM_TX_Info *lp_tx = (TM_TX_Info *)gv_tm_info.get_tx(lp_transid);
// sent to the wrong TM or this tx never existed or has been forgotten.
if (lp_tx == NULL)
{
pp_msg->response()->u.iv_ax_reg.iv_TM_incarnation_num = gv_tm_info.incarnation_num();
pp_msg->response()->u.iv_ax_reg.iv_LeadTM_nid = gv_tm_info.lead_tm_nid();
if (pp_msg->request()->u.iv_ax_reg.iv_flags & TM_TT_NO_UNDO)
lv_error = FEWRONGID;
else
lv_error = FEINVTRANSID;
TMTrace(3, ("tm_process_req_ax_reg, ID (%d,%d) from RM %d not found in transactionPool - "
"redirecting TSE to Lead TM, error %d.\n",
lp_transid->iv_node, lp_transid->iv_seq_num, lv_rmid, lv_error));
//tm_log_event(DTM_TM_NO_TRANS, SQ_LOG_WARNING, "DTM_TM_NO_TRANS",
// lv_error,lv_rmid,lp_transid->iv_node,lp_transid->iv_seq_num);
pp_msg->reply(lv_error);
delete pp_msg;
return;
}
lp_tx->stats()->ax_reg()->start();
// The TSE doesn't always know its rmid, so we can't rely on that.
// Instead we lookup the RM in out list.
if (lv_rmid == -1 || lv_rmid == 0)
{
lv_error = BMSG_GETREQINFO_(MSGINFO_PTYPE, pp_msg->msgid(), &lv_ptype);
if (!lv_error && lv_ptype == MS_ProcessType_TSE)
{
lv_error = BMSG_GETREQINFO_(MSGINFO_NID, pp_msg->msgid(), &lv_nid);
if (!lv_error)
lv_error = BMSG_GETREQINFO_(MSGINFO_PID, pp_msg->msgid(), &lv_pid);
if (lv_error)
{
TMTrace(1, ("tm_process_req_ax_reg, Error %d retrieving nid "
"(%d) and pid (%d) for TSE. ax_reg ignored.\n",
lv_error, lv_nid, lv_pid));
tm_log_event(DTM_AX_REG_NID_PID_BAD, SQ_LOG_CRIT, "DTM_AX_REG_NID_PID_BAD",
lv_error, lv_rmid, -1, -1, pp_msg->msgid(), -1, -1, -1, -1, -1, -1,
-1, -1, -1, lv_pid, -1, NULL, lv_nid);
pp_msg->reply(FENOTFOUND);
delete pp_msg;
return;
}
lv_rmid = gv_RMs.TSE()->return_rmid(lv_nid, lv_pid);
if (lv_rmid == -1)
{
TMTrace(1, ("tm_process_req_ax_reg, RM not found in RM list. "
"ax_reg ignored.\n"));
tm_log_event(DTM_AX_REG_NOTFOUND, SQ_LOG_CRIT, "DTM_AX_REG_NOTFOUND",
-1, lv_rmid, -1, -1, pp_msg->msgid(), -1, -1, -1, -1, -1, -1,
-1, -1, -1, lv_pid, -1, NULL, lv_nid);
pp_msg->reply(FENOTFOUND);
delete pp_msg;
return;
}
else
TMTrace(3, ("tm_process_req_ax_reg, TSE ax_reg for rmid %d, TSE (%d, %d).\n",
lv_rmid, lv_nid, lv_pid));
}
else // Not TSE or error
{
if (!lv_error)
lv_error = BMSG_GETREQINFO_(MSGINFO_NID, pp_msg->msgid(), &lv_nid);
if (!lv_error)
lv_error = BMSG_GETREQINFO_(MSGINFO_PID, pp_msg->msgid(), &lv_pid);
if (lv_error)
{
TMTrace(1, ("tm_process_req_ax_reg, Error %d retrieving PTYPE (%d), "
"nid (%d) or pid (%d). ax_reg ignored!\n",
lv_error, lv_ptype, lv_nid, lv_pid));
tm_log_event(DTM_AX_REG_PTYPE_BAD, SQ_LOG_CRIT, "DTM_AX_REG_PTYPE_BAD",
lv_error,-1,-1,-1,pp_msg->msgid(),-1,-1,-1,-1,-1,-1,-1,-1,lv_pid,
lv_ptype,-1,NULL,lv_nid);
pp_msg->reply(FENOTFOUND);
delete pp_msg;
return;
}
else // Not an error - ax_reg from XARM library and should contain the rmid.
// but not yet implemented!
{
TMTrace(1, ("tm_process_req_ax_reg, Received unexpected ax_reg from non-TSE"
" process (%d, %d), PTYPE %d assuming this was an XARM request!?, ignored!\n",
lv_nid, lv_pid, lv_ptype));
tm_log_event(DTM_AX_REG_XARM_NOTSUPPORTED, SQ_LOG_CRIT, "DTM_AX_REG_XARM_NOTSUPPORTED",
-1,pp_msg->request()->u.iv_ax_reg.iv_rmid,-1,pp_msg->msgid(),
-1,-1,-1,-1,-1,-1,-1,-1,lv_pid,lv_ptype,0,lv_nid);
pp_msg->reply(FENOTFOUND);
delete pp_msg;
return;
}
}
}
// Save the rmid back in the message
pp_msg->request()->u.iv_ax_reg.iv_rmid = lv_rmid;
// Call directly in the main thread to improve performance.
//lp_tx->queueToTransaction(lp_transid, pp_msg);
lp_tx->req_ax_reg(pp_msg);
lv_seq_num = lp_transid->iv_seq_num;
delete pp_msg;
lp_tx->stats()->ax_reg()->stop();
TMTrace(2, ("tm_process_req_ax_reg, ID %d, EXIT\n", lv_seq_num));
} //tm_process_req_ax_reg
// --------------------------------------------------------------------
// tm_process_req_ax_unreg
// Purpose : process message of type TM_MSG_TYPE_AX_UNREG
// --------------------------------------------------------------------
void tm_process_req_ax_unreg (CTmTxMessage * pp_msg)
{
TMTrace(2, ("tm_process_req_ax_unreg ENTRY\n"));
// sorry, not implemented right now!
pp_msg->reply(FEOK);
delete pp_msg;
TMTrace(2, ("tm_process_req_ax_unreg EXIT\n"));
}
// ------------------------------------------------------------------
// callback methods and processing downline from callbacks
// ------------------------------------------------------------------
// ------------------------------------------------------------------
// tm_sync_cb
// Purpose : this method is registered with seabed and is used when
// a sync is received (Phase 1)
// ------------------------------------------------------------------
int32 tm_sync_cb (void *pp_data, int32 pv_len , int32 pv_handle)
{
Tm_Sync_Header *lp_hdr = (Tm_Sync_Header*)pp_data;
Tm_Sync_Data *lp_sync_data = new Tm_Sync_Data;
Tm_Sync_Data *lp_data = (Tm_Sync_Data *)pp_data;
pv_len = pv_len; // intel compiler warning 869
if (pp_data == NULL)
{
tm_log_event(DTM_SYNC_INVALID_DATA, SQ_LOG_CRIT, "DTM_SYNC_INVALID_DATA");
TMTrace(1, ("tm_sync_cb : data is invalid\n"));
abort ();
}
TMTrace(2, ("tm_sync_cb ENTRY : type %d\n", lp_hdr->iv_type));
// allow duplicates per Charles
Tm_Sync_Data *lp_existing_data = (Tm_Sync_Data *)gv_sync_map.get(pv_handle);
if (lp_existing_data != NULL)
{
delete lp_sync_data;
return 0;
}
switch (lp_hdr->iv_type)
{
case TM_BEGIN_SYNC:
case TM_END_SYNC:
case TM_FORGET_SYNC:
{
#ifdef DEBUG_MODE
bool lv_test = false;
ms_getenv_bool("TM_TEST_SINGLE_FORCE_ABORT", &lv_test);
if (lv_test)
{
// sprintf(la_buf, "TM Test: Force Abort\n");
// tm_log_write(DTM_TM_TEST_FORCE_ABORT, SQ_LOG_CRIT, la_buf);
// TMTrace(1, ("tm_sync_cb - %s", la_buf));
abort ();
}
#endif
if (lp_data->u.iv_tx_data.iv_pid <= 0)
{
tm_log_event (DTM_SYNC_INVALID_PID, SQ_LOG_CRIT, "DTM_SYNC_INVALID_PID",
-1, /*error_code*/
-1, /*rmid*/
-1, /*dtmid*/
-1, /*seq_num*/
-1, /*msgid*/
-1, /*xa_error*/
-1, /*pool_size*/
-1, /*pool_elems*/
-1, /*msg_retries*/
-1, /*pool_high*/
-1, /*pool_low*/
-1, /*pool_max*/
-1, /*tx_state*/
lp_data->u.iv_tx_data.iv_pid); /*data */
TMTrace(1, ("tm_sync_cb - Invalid sync PID: %d\n", lp_data->u.iv_tx_data.iv_pid));
abort ();
}
if (lp_data->u.iv_tx_data.iv_transid.id[0] <= 0)
{
tm_log_event (DTM_SYNC_INVALID_TRANSID, SQ_LOG_CRIT, "DTM_SYNC_INVALID_TRANSID",
-1, /*error_code*/
-1, /*rmid*/
-1, /*dtmid*/
-1, /*seq_num*/
-1, /*msgid*/
-1, /*xa_error*/
-1, /*pool_size*/
-1, /*pool_elems*/
-1, /*msg_retries*/
-1, /*pool_high*/
-1, /*pool_low*/
-1, /*pool_max*/
-1, /*tx_state*/
-1, /*data */
-1, /*data1*/
-lp_data->u.iv_tx_data.iv_transid.id[0]);/*data2 */
TMTrace(1, ("tm_sync_cb - Invalid sync Trans ID: " PFLL "\n",
lp_data->u.iv_tx_data.iv_transid.id[0]));
abort ();
}
break;
}
case TM_UP:
{
break;
}
case TM_STATE_RESYNC:
{
TMTrace(3, ("tm_sync_cb - TM_STATE_RESYNC received \n"));
// nothing to validate since these are booleans and the node
// being recovered could be a -1
break;
}
case TM_RECOVERY_START:
case TM_RECOVERY_END:
{
if ((lp_data->u.iv_to_data.iv_my_node < 0) ||
(lp_data->u.iv_to_data.iv_my_node > MAX_NODES) ||
(lp_data->u.iv_to_data.iv_down_node < 0) ||
(lp_data->u.iv_to_data.iv_down_node > MAX_NODES))
{
tm_log_event(DTM_TM_NODE_OUTSIDE_RANGE, SQ_LOG_CRIT, "DTM_TM_NODE_OUTSIDE_RANGE");
TMTrace(1, ("tm_sync_cb - Received RECOVERY sync with node out of range.\n"));
abort ();
}
#ifdef DEBUG_MODE
bool lv_assert = false;
ms_getenv_bool("TM_TEST_AFTER_REC_START_SYNC_ASSERT", &lv_assert);
if (lv_assert == true)
{
// sprintf(la_buf, "TM Test: Rec start sync assert\n");
// tm_log_write(DTM_TM_TEST_REC_START_SYNC, SQ_LOG_CRIT, la_buf);
// TMTrace(1, ("tm_sync_cb - %s", la_buf));
abort ();
}
#endif
break;
}
case TM_LISTBUILT_SYNC:
{
if ((lp_data->u.iv_list_built.iv_down_node < 0) ||
(lp_data->u.iv_list_built.iv_down_node > MAX_NODES))
{
tm_log_event(DTM_TM_NODE_OUTSIDE_RANGE, SQ_LOG_CRIT, "DTM_TM_NODE_OUTSIDE_RANGE");
TMTrace(1, ("tm_sync_cb - Received TM_LISTBUILT_SYNC sync with node out of range\n"));
abort ();
}
TMTrace(1, ("tm_sync_cb - received TM_LISTBUILT_SYNC, verification successful\n"));
break;
}
case TM_PROCESS_RESTART:
break;
case TM_SYS_RECOV_START_SYNC:
case TM_SYS_RECOV_END_SYNC:
{
if ((lp_data->u.iv_sys_recov_data.iv_sys_recov_state > TM_SYS_RECOV_STATE_END) ||
(lp_data->u.iv_sys_recov_data.iv_sys_recov_lead_tm_node < 0) ||
(lp_data->u.iv_sys_recov_data.iv_sys_recov_lead_tm_node > MAX_NODES))
{
tm_log_event(DTM_TM_NODE_OUTSIDE_RANGE, SQ_LOG_CRIT, "DTM_TM_NODE_OUTSIDE_RANGE");
TMTrace(1, ("tm_sync_cb - Received RECOVERY sync with node out of range\n"));
abort ();
}
break;
}
default:
{
tm_log_event(DTM_TM_UNKNOWN_SYNC_TYPE, SQ_LOG_CRIT, "DTM_TM_UNKNOWN_SYNC_TYPE");
TMTrace(1, ("tm_sync_cb - Unknown sync header type received\n"));
abort ();
break;
}
};
memcpy (lp_sync_data, lp_data, sizeof (Tm_Sync_Data));
gv_sync_map.put(pv_handle, lp_sync_data);
TMTrace(2, ("tm_sync_cb EXIT : type %d\n", lp_hdr->iv_type));
return 0;
}
void tm_recipient_sync_commit (Tm_Sync_Data *pp_sync_data)
{
TMTrace(2, ("tm_recipient_sync_commit : ENTRY, type %d\n",
pp_sync_data->iv_hdr.iv_type));
switch (pp_sync_data->iv_hdr.iv_type)
{
case TM_BEGIN_SYNC:
{
gv_tm_info.add_sync_data(pp_sync_data->iv_hdr.iv_nid,
&pp_sync_data->u.iv_tx_data);
gv_system_tx_count++;
break;
}
case TM_END_SYNC:
{
Tm_Tx_Sync_Data *lp_data = gv_tm_info.get_sync_data(
&pp_sync_data->u.iv_tx_data);
// Add sync data to the sync data list if
// it isn't already in the list for the sending node.
if (lp_data == NULL)
gv_tm_info.add_sync_data(pp_sync_data->iv_hdr.iv_nid,
&pp_sync_data->u.iv_tx_data);
else
lp_data->iv_state = pp_sync_data->u.iv_tx_data.iv_state;
break;
}
case TM_FORGET_SYNC:
{
gv_tm_info.remove_sync_data(&pp_sync_data->u.iv_tx_data);
break;
}
case TM_STATE_RESYNC:
{
TMTrace(3, ("tm_recipient_sync_commit - TM_STATE_RESYNC sync received.\n"));
gv_tm_info.node_being_recovered(pp_sync_data->u.iv_state_resync.iv_index,
pp_sync_data->u.iv_state_resync.iv_node_being_recovered);
gv_tm_info.down_without_sync( pp_sync_data->u.iv_state_resync.iv_index,
pp_sync_data->u.iv_state_resync.iv_down_without_sync);
gv_tm_info.recovery_list_built( pp_sync_data->u.iv_state_resync.iv_index,
pp_sync_data->u.iv_state_resync.iv_list_built);
break;
}
case TM_RECOVERY_START:
{
tm_log_event(DTM_TM_START_NODE_RECOVERY, SQ_LOG_INFO, "DTM_TM_START_NODE_RECOVERY");
TMTrace(1, ("tm_recipient_sync_commit - RECOVERY START sync received.\n"));
// The lead TM can not receive this sync. Issue an event and shutdown the cluster
if (gv_tm_info.lead_tm())
{
tm_log_event(DTM_LEAD_TM_TM_SYNC_UNEXPECTED, SQ_LOG_CRIT, "DTM_LEAD_TM_TM_SYNC_UNEXPECTED", FEDUP);
TMTrace(1, ("tm_recipient_sync_recipient : Error TM_RECOVERY_START sync received by Lead TM.\n"));
gv_tm_info.error_shutdown_abrupt(FEDUP);
}
if (pp_sync_data->u.iv_to_data.iv_down_node == -1)
{
tm_log_event(DTM_TM_START_NODE_RECOVERY, SQ_LOG_CRIT, "DTM_TM_START_NODE_RECOVERY");
TMTrace(1, ("tm_recipient_sync_commit - Invalid node id received for a RECOVERY START sync\n"));
abort ();
}
gv_tm_info.node_being_recovered (
pp_sync_data->u.iv_to_data.iv_down_node,
pp_sync_data->u.iv_to_data.iv_my_node);
gv_tm_info.down_without_sync(pp_sync_data->u.iv_to_data.iv_down_node, false);
TMTrace(3, ("tm_recipient_sync_commit - setting down_without_sync to FALSE for node %d\n",
pp_sync_data->u.iv_to_data.iv_down_node));
gv_tm_info.schedule_init_and_recover_rms();
break;
}
case TM_RECOVERY_END:
{
tm_log_event(DTM_TM_END_NODE_RECOVERY, SQ_LOG_INFO, "DTM_TM_END_NODE_RECOVERY");
TMTrace(1, ("tm_recipient_sync_commit - RECOVERY END sync received.\n"));
if (pp_sync_data->u.iv_to_data.iv_down_node == -1)
{
tm_log_event(DTM_TM_END_NODE_RECOVERY, SQ_LOG_CRIT, "DTM_TM_END_NODE_RECOVERY");
TMTrace(1, ("tm_recipient_sync_commit - Invalid node id received for a RECOVERY END sync.\n"));
abort ();
}
//reset
TMTrace(3, ("tm_recipient_sync_commit setting recovery_list_built to FALSE for Node "
" %d\n",pp_sync_data->u.iv_to_data.iv_down_node));
// reset list built flag for recovery
gv_tm_info.recovery_list_built (pp_sync_data->u.iv_to_data.iv_down_node, false);
gv_tm_info.node_being_recovered (pp_sync_data->u.iv_to_data.iv_down_node, -1);
gv_tm_info.set_sys_recov_status(TM_SYS_RECOV_STATE_END,
pp_sync_data->u.iv_to_data.iv_my_node); //my node sb the lead tm
gv_tm_info.tm_up();
break;
}
case TM_LISTBUILT_SYNC:
{
tm_log_event(DTM_TM_LISTBUILT_SYNC, SQ_LOG_INFO, "DTM_TM_LISTBUILT_SYNC");
TMTrace(3, ("tm_recipient_sync_commit (TM_LISTBUILT_SYNC) setting recovery_list_built "
" to TRUE for Node %d\n",pp_sync_data->u.iv_list_built.iv_down_node));
gv_tm_info.recovery_list_built (pp_sync_data->u.iv_list_built.iv_down_node, true);
break;
}
case TM_PROCESS_RESTART:
tm_log_event(DTM_TM_PROCESS_RESTART_SYNC, SQ_LOG_INFO, "DTM_TM_PROCESS_RESTART_SYNC");
TMTrace(1, ("tm_recipient_sync_commit - process restart sync received.\n"));
gv_tm_info.restarting_tm(pp_sync_data->u.iv_proc_restart_data.iv_proc_restart_node);
gv_tm_info.schedule_init_and_recover_rms();
break;
case TM_SYS_RECOV_START_SYNC:
case TM_SYS_RECOV_END_SYNC:
{
gv_tm_info.set_sys_recov_status(pp_sync_data->u.iv_sys_recov_data.iv_sys_recov_state,
pp_sync_data->u.iv_sys_recov_data.iv_sys_recov_lead_tm_node);
break;
}
case TM_UP:
{
// this is received upon startup after recovery, so its a fresh system
gv_tm_info.can_takeover(true);
gv_tm_info.tm_up();
break;
}
default:
{
tm_log_event(DTM_TM_UNKNOWN_SYNC_TYPE, SQ_LOG_CRIT, "DTM_TM_UNKNOWN_SYNC_TYPE");
TMTrace(1, ("tm_recipient_sync_commit : invalid data\n"));
abort ();
break;
}
};
TMTrace(2, ("tm_recipient_sync_commit EXIT \n"));
}
// --------------------------------------------------------------------
// tm_get_leader_info
// Purpose : Get the new tm leader.
// --------------------------------------------------------------------
void tm_get_leader_info()
{
// Nothing to do here if we are already the Lead TM.
if (gv_tm_info.lead_tm() == true)
return;
int32 lv_leader_nid, lv_leader_pid;
char la_leader_name[BUFSIZ];
int32 lv_old_leader_nid = gv_tm_info.lead_tm_nid();
int lv_leader_error = msg_mon_tm_leader_set(&lv_leader_nid,
&lv_leader_pid, la_leader_name);
// ignore error as it simply indicates that we are not the leader.
if (lv_leader_error)
{
TMTrace(3, ("tm_get_leader_info : Error %d returned by "
"msg_mon_tm_leader_set - $TM%d is not the Lead. Error ignored.\n",
lv_leader_error, gv_tm_info.nid()));
}
gv_tm_info.lead_tm_nid(lv_leader_nid);
if (lv_leader_nid != lv_old_leader_nid)
{
tm_log_event (DTM_TM_LEADTM_SET, SQ_LOG_INFO , "DTM_TM_LEADTM_SET",
-1, /*error_code*/
-1, /*rmid*/
gv_tm_info.nid(), /*dtmid*/
-1, /*seq_num*/
-1, /*msgid*/
-1, /*xa_error*/
-1, /*pool_size*/
-1, /*pool_elems*/
-1, /*msg_retries*/
-1, /*pool_high*/
-1, /*pool_low*/
-1, /*pool_max*/
-1, /*tx_state*/
lv_old_leader_nid, /*data */
-1, /*data1*/
-1,/*data2 */
NULL, /*string2*/
lv_leader_nid /*node*/);
TMTrace(3, ("tm_get_leader_info : Node %d is new Lead DTM.\n", lv_leader_nid));
if (lv_leader_nid == gv_tm_info.nid())
{
// modify the wait interval now for this lead dtm
gv_wait_interval = LEAD_DTM_WAKEUP_INTERVAL/10;
gv_tm_info.lead_tm(true);
gv_tm_info.lead_tm_takeover(true);
gv_tm_info.open_other_tms();
// Add a Checkpoint event to drive cp processing
// gv_tm_info.tmTimer()->cancelControlpointEvent();
// gv_tm_info.tmTimer()->addControlpointEvent(gv_tm_info.cp_interval());
// Add a stats event
gv_tm_info.tmTimer()->cancelStatsEvent();
gv_tm_info.tmTimer()->addStatsEvent(gv_tm_info.stats_interval());
}
}
else
{
TMTrace(3, ("tm_get_leader_info : Lead DTM did not change. Node %d is still Lead DTM.\n",
lv_leader_nid));
}
} //tm_get_leader_info
//---------------------------------------------------------------------
// tm_originating_sync_commit
// Purpose - helper method to process the phase2 sync from the
// originating TM
// --------------------------------------------------------------------
void tm_originating_sync_commit (int32 pv_tag)
{
CTmTxBase *lp_tx = NULL;
Tm_Sync_Type_Transid *lp_data = gv_tm_info.get_sync_otag(pv_tag);
CTmTxMessage *lp_msg;
// assert (lp_data != NULL);
if (lp_data == NULL)
{
TMTrace(1, ("tm_originating_sync_commit : ERROR tag %d not found in sync tags, sync ignored.\n",
pv_tag));
return;
}
TMTrace(2, ("tm_originating_sync_commit ENTRY, ID %d, tag %d, type %d.\n",
lp_data->u.iv_seqnum, pv_tag, lp_data->iv_sync_type));
switch(lp_data->iv_sync_type)
{
case TM_END_SYNC:
case TM_BEGIN_SYNC:
{
lp_tx = (CTmTxBase *) gv_tm_info.get_tx(lp_data->u.iv_node_to_takeover, lp_data->u.iv_seqnum);
if (lp_tx == NULL)
{
tm_log_event(DTM_TM_INVALID_TRANSACTION, SQ_LOG_CRIT, "DTM_TM_INVALID_TRANSACTION");
TMTrace(1, ("tm_originating_sync_commit : END/BEGIN SYNC - Unable to find "
" transaction during a phase 2 sync\n"));
abort ();
}
TMTrace(3, ("tm_originating_sync_commit : END/BEGIN SYNC "
"for ID %d, Tx.SeqNum %d, Tx.Tag %d, tag %d, type %d.\n",
lp_data->u.iv_seqnum, lp_tx->seqnum(), lp_tx->tag(), pv_tag, lp_data->iv_sync_type));
lp_tx->schedule_eventQ();
break;
}
case TM_FORGET_SYNC:
{
// we are done, queue endforget event against txn thread.
lp_tx = (CTmTxBase *) gv_tm_info.get_tx(lp_data->u.iv_node_to_takeover, lp_data->u.iv_seqnum);
// If the transaction object doesn't exist we assume that it's already been
// cleaned up. This can happen, for example, when a begin or end sync completion
// arrives after we've already issued the forget sync. TM_TX_Info::schedule_eventQ
// will drive forget processing because the transaction is in forgotten state.
// In this case the best thing we can do is simply through away the forget sync
// completion here.
if (lp_tx == NULL)
{
tm_log_event(DTM_TM_INVALID_TRANSACTION, SQ_LOG_CRIT, "DTM_TM_INVALID_TRANSACTION");
TMTrace(1, ("tm_originating_sync_commit : FORGET_SYNC - WARNING "
"Unable to find transaction for a phase 2 "
"forget sync completion. Completion assumed out of order and ignored!\n"));
abort ();
}
else
{
TMTrace(3, ("tm_originating_sync_commit : FORGET_SYNC "
"for ID %d, Tx.SeqNum %d, Tx.Tag %d, tag %d, type %d.\n",
lp_data->u.iv_seqnum, lp_tx->seqnum(), lp_tx->tag(), pv_tag, lp_data->iv_sync_type));
lp_msg = lp_tx->currRequest();
if (lp_msg)
{
lp_msg->requestType(TM_MSG_TXINTERNAL_ENDFORGET);
lp_tx->eventQ_push(lp_msg);
}
else
{
tm_log_event(DTM_TM_INVALID_TRANSACTION, SQ_LOG_CRIT, "DTM_TM_INVALID_TRANSACTION");
TMTrace(1, ("tm_originating_sync_commit : FORGET_SYNC - Forget Sync phase 2 for transaction "
"%d but request "
"has already completed! Forget ignored.\n", lp_tx->seqnum()));
}
}
break;
}
case TM_STATE_RESYNC:
{
TMTrace(1, ("tm_originating_sync_commit, TM_STATE_RESYNC received, no-op\n"));
break;
}
case TM_RECOVERY_START:
{
#ifdef DEBUG_MODE
bool lv_verify = false;
ms_getenv_bool("TM_VERIFY", &lv_verify);
if (lv_verify)
{
if (gv_tm_info.iv_trace_level)
{
if (gv_tm_info.tm_test_verify(lp_data->u.iv_node_to_takeover))
trace_printf("tm_verify after takeover successful\n");
else
trace_printf("tm_verify after takeover ERROR\n");
}
}
#endif
break;
}
case TM_RECOVERY_END:
{
break;
}
case TM_LISTBUILT_SYNC:
{
// We don't need to do anything in the originating TM since we've already
// recorded the appropriate flags.
TMTrace(3, ("tm_originating_sync_commit : received TM_LISTBUILT_SYNC, no-op.\n"));
break;
}
case TM_UP:
{
// set registry entry to indicate that transaction service is ready.
gv_tm_info.set_txnsvc_ready(TXNSVC_UP);
break;
}
case TM_PROCESS_RESTART:
break;
case TM_SYS_RECOV_START_SYNC:
{
// software fault
if (!gv_tm_info.ClusterRecov())
{
tm_log_event(DTM_RECOVERY_FAILED2, SQ_LOG_CRIT, "DTM_RECOVERY_FAILED2",
-1,-1,gv_tm_info.nid());
abort(); // this is a software fault that doesn't warrant taking
// down the cluster
}
// System recovery now runs in the timer thread to keep the main thread from blocking.
gv_tm_info.schedule_recover_system();
break;
}
case TM_SYS_RECOV_END_SYNC:
{
// Lead TM: Send out TM_UP sync and then set TM_UP to allow new transactions to be processed.
send_state_up_sync(gv_tm_info.nid());
gv_tm_info.tm_up();
gv_tm_info.can_takeover(true);
break;
}
default:
{
// TODO
break;
}
};
TMTrace(2, ("tm_originating_sync_commit EXIT, TxnId %d, sync type %d\n",
lp_data->u.iv_seqnum, lp_data->iv_sync_type));
gv_tm_info.remove_sync_otag(pv_tag);
}
void tm_originating_sync_abort(int32 pv_tag)
{
CTmTxBase *lp_tx = NULL;
Tm_Sync_Type_Transid *lp_data = gv_tm_info.get_sync_otag(pv_tag);
// we need to allow this to not be here as the monitor can choose the abort...
if (lp_data == NULL)
{
TMTrace(1, ("tm_originating_sync_abort : NULL data for tag %d\n", pv_tag));
}
else
{
TMTrace(2, ("tm_originating_sync_abort ENTRY, tag=%d, type=%d\n", pv_tag, lp_data->iv_sync_type));
if (lp_data->iv_num_tries >=3 )
{
tm_log_event(DTM_TM_EXCEEDED_SYNC_ABORT_TRIES, SQ_LOG_CRIT, "DTM_TM_EXCEEDED_SYNC_ABORT_TRIES");
TMTrace(1, ("tm_originating_sync_abort : max number of retries, exiting.\n"));
abort (); // retry 3 times
}
lp_data->iv_num_tries++;
switch(lp_data->iv_sync_type)
{
case TM_BEGIN_SYNC:
case TM_END_SYNC:
case TM_FORGET_SYNC:
{
lp_tx = (CTmTxBase *) gv_tm_info.get_tx(lp_data->u.iv_node_to_takeover, lp_data->u.iv_seqnum);
if (lp_tx == NULL)
{
tm_log_event(DTM_TM_INVALID_TRANSACTION, SQ_LOG_CRIT, "DTM_TM_INVALID_TRANSACTION");
TMTrace(1, ("tm_originating_sync_abort - Unable to find transaction "
"during a phase 2 sync.\n"));
abort ();
}
lp_tx->schedule_redrive_sync();
break;
}
case TM_UP:
{
send_state_up_sync(gv_tm_info.nid());
break;
}
case TM_STATE_RESYNC:
{
TMTrace(3, ("tm_originating_sync_abort - TM_STATE_RESYNC sync received.\n"));
send_state_resync (gv_tm_info.nid(),
gv_tm_info.down_without_sync(lp_data->u.iv_node_to_takeover),
gv_tm_info.node_being_recovered(lp_data->u.iv_node_to_takeover),
gv_tm_info.recovery_list_built(lp_data->u.iv_node_to_takeover),
lp_data->u.iv_node_to_takeover);
break;
}
case TM_RECOVERY_START:
{
send_takeover_tm_sync (TM_RECOVERY_START, gv_tm_info.nid(),
lp_data->u.iv_node_to_takeover);
break;
}
case TM_RECOVERY_END:
{
send_takeover_tm_sync (TM_RECOVERY_END, gv_tm_info.nid(),
lp_data->u.iv_node_to_takeover);
break;
}
case TM_LISTBUILT_SYNC:
{
send_recov_listbuilt_sync (gv_tm_info.nid(), lp_data->u.iv_node_to_takeover);
break;
}
case TM_PROCESS_RESTART:
break;
case TM_SYS_RECOV_START_SYNC:
{
send_sys_recov_start_sync(gv_tm_info.nid());
break;
}
case TM_SYS_RECOV_END_SYNC:
{
send_sys_recov_end_sync(gv_tm_info.nid());
break;
}
default:
{
tm_log_event(DTM_TM_UKN_SYNC_TYPE, SQ_LOG_WARNING, "DTM_TM_UKN_SYNC_TYPE");
break;
}
};
// do not remove from table as we will retry
}
TMTrace(2, ("tm_originating_sync_abort EXIT\n"));
}
// ---------------------------------------------------------------------------
// tm_process_node_down_msg
// Purpose : Process a down msg from the monitor (virtual nodes). For real
// clusters, process a DTM process death in the case of a logical node failure
// ---------------------------------------------------------------------------
void tm_process_node_down_msg(int32 pv_nid)
{
gv_tm_info.close_tm(pv_nid);
TMTrace(2, ("tm_process_node_down_msg ENTRY, nid %d\n", pv_nid));
tm_log_event(DTM_NODEDOWN, SQ_LOG_INFO, "DTM_NODEDOWN",
-1,-1,gv_tm_info.nid(),-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
NULL,pv_nid);
if (!gv_tm_info.lead_tm())
{
tm_get_leader_info();
if (gv_tm_info.lead_tm() == false)
{
TMTrace(2, ("tm_process_node_down_msg EXIT - %d is not the lead TM.\n", pv_nid));
return;
}
}
if ((gv_tm_info.sys_recov_state() != TM_SYS_RECOV_STATE_END) &&
(gv_tm_info.sys_recov_lead_tm_nid() == pv_nid))
// If this is system startup time and system recovery has not yet ended
// and the down node is the previous lead TM node, this new Lead TM needs
// to perform system recovery again. There are no new outstanding
// transactions to take over from the down node at this stage since
// transaction has now yet been enabled.
{
gv_tm_info.ClusterRecov(new TM_Recov(gv_tm_info.rm_wait_time()));
gv_tm_info.ClusterRecov()->initiate_start_sync();
}
else
{
if (gv_tm_info.can_takeover())
{
// take over phase 1 is now called by TM_Info:restart_tm to make sure
// it happens after the tm process starts.
//tm_process_take_over_phase1 (pv_nid);
// lets start with a clean slate and write a control
// point after a takeover just in case the lead went
// down, and for an otherwise fresh start
if ((gv_tm_info.state() == TM_STATE_SHUTTING_DOWN) ||
(gv_tm_info.state() == TM_STATE_SHUTDOWN_COMPLETED))
{
if (gv_tm_info.num_active_txs() <= 0)
{
// redrive the shutdown operation
TMShutdown *lp_Shutdown = new TMShutdown(&gv_tm_info, gv_RMs.TSE()->return_rms());
gv_tm_info.shutdown_coordination_started(true);
lp_Shutdown->coordinate_shutdown();
delete lp_Shutdown;
// This must be the lead TM. After the shutdown, set the registry
// entry to indicate that transaction service has stoppped
gv_tm_info.set_txnsvc_ready(TXNSVC_DOWN);
}
}
}
}
// Since the node is down, the TM is closed
gv_tm_info.close_tm(pv_nid);
TMTrace(2, ("tm_process_node_down_msg EXIT nid %d\n", pv_nid));
} //tm_process_node_down_msg
// -----------------------------------------------------------------
// tm_process_node_quiesce_msg
// Purpose : process a quiesce node notice from the Monitor.
// This can be received by any TM. The TM suspends transaction
// processing but will still process Management requests and TSE
// replies to outstanding requests. The Monitor will kill this
// TM process once TSEs have completed control pointing.
// pv_stop is only set to true for TM testing.
// ----------------------------------------------------------------
void tm_process_node_quiesce_msg(CTmTxMessage *pp_msg=NULL)
{
short lv_error = FEOK;
static int32 lv_lastTMState = gv_tm_info.state();
bool lv_stop = (pp_msg)?pp_msg->request()->u.iv_quiesce.iv_stop:false;
TMTrace(2, ("tm_process_node_quiesce_msg ENTRY, stop=%d, current TM State %d.\n",
lv_stop, lv_lastTMState));
tm_log_event(DTM_NODEQUIESCE, SQ_LOG_INFO, "DTM_NODEQUIESCE",
-1,-1,gv_tm_info.nid(),-1,-1,-1,-1,-1,-1,-1,-1,-1,gv_tm_info.state(),lv_stop);
if (lv_stop)
{
if (gv_tm_info.state() != TM_STATE_QUIESCE)
{
TMTrace(1, ("tm_process_node_quiesce_msg - Must quiesce first!!\n"));
}
else
gv_tm_info.state(lv_lastTMState);
}
else
gv_tm_info.state(TM_STATE_QUIESCE);
tm_log_event(DTM_TM_QUIESCED, SQ_LOG_WARNING, "DTM_TM_QUIESCED",
-1, -1, gv_tm_info.nid());
TMTrace(1, ("TM %d quiescing.\n", gv_tm_info.nid()));
if (pp_msg != NULL)
{
pp_msg->reply(lv_error);
delete pp_msg;
}
TMTrace(2, ("tm_process_node_quiesce_msg EXIT.\n"));
} //tm_process_req_quiesce
void tm_abort_all_transactions(bool pv_shutdown)
{
TMTrace(2, ("tm_abort_all_transactions ENTRY with shutdown=%d.\n", pv_shutdown));
gv_tm_info.abort_all_active_txns();
if (!pv_shutdown)
gv_tm_info.state(TM_STATE_UP);
TMTrace(2, ("tm_abort_all_transactions EXIT\n"));
}
// ----------------------------------------------------------
// tm_process_registry_change
// Purpose - determine if a DTM key was changed and if we need
// to take action
// -----------------------------------------------------------
void tm_process_registry_change(MS_Mon_Change_def *pp_change )
{
int32 lv_value;
char lv_regKeyText[1024];
char *lp_regKeyText = (char *) &lv_regKeyText;
sprintf(lp_regKeyText, "%s:%s=%s", pp_change->group,
pp_change->key, pp_change->value);
TMTrace(1, ("tm_process_registry_change Registry Change notice key %s.\n", lp_regKeyText));
//tm_log_event(DTM_REGCHANGE_NOTICE, SQ_LOG_INFO, "DTM_REGCHANGE_NOTICE",
// -1,-1,gv_tm_info.nid(),-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,lp_regKeyText);
if (strcmp(pp_change->key, DTM_STALL_PHASE_2) == 0)
{
lv_value = atoi (pp_change->value);
if (lv_value >= 0)
{
gv_tm_info.stall_phase_2(lv_value);
HbaseTM_initiate_stall(lv_value);
}
}
else if (strcmp(pp_change->key, DTM_RM_WAIT_TIME) == 0)
{
lv_value = atoi (pp_change->value);
if (lv_value > 0)
{
lv_value *= 100; // 100 (secs to 10 msecs)
gv_tm_info.rm_wait_time(lv_value);
}
}
else if (strcmp(pp_change->key, DTM_TM_TRACE) == 0)
{
lv_value = atoi (pp_change->value);
if (lv_value >= 0)
gv_tm_info.set_trace(lv_value/*detail*/);
}
else if (strcmp(pp_change->key, DTM_TRANS_HUNG_RETRY_INTERVAL) == 0)
{
lv_value = atoi (pp_change->value);
if (lv_value > 0)
gv_tm_info.trans_hung_retry_interval(lv_value);
}
else if (strcmp(pp_change->key, DTM_XATM_TRACE) == 0)
{
if (strcmp(pp_change->value,"") != 0)
gv_tm_info.set_xa_trace(pp_change->value);
}
else if (strcmp(pp_change->key, DTM_TIMERTHREAD_WAIT) == 0)
{
lv_value = atoi (pp_change->value);
if (lv_value > 0 || lv_value == -1)
gv_tm_info.timerDefaultWaitTime(lv_value);
gv_tm_info.tmTimer()->defaultWaitTime(gv_tm_info.timerDefaultWaitTime());
}
// Note that with pool configuration parameters you must set/alter them in a
// specific order if they overlap because they are parsed separately.
// Increasing values: Set max, then ss_high, then ss_low.
// Decreasing values: Set ss_low, then ss_high, then max.
else if (strcmp(pp_change->key, DTM_TM_STATS) == 0)
{
lv_value = atoi (pp_change->value);
bool lv_tm_stats = ((lv_value == 0)?false:true);
gv_tm_info.stats()->initialize(lv_tm_stats, gv_tm_info.stats()->collectInterval());
gv_tm_info.threadPool()->setConfig(lv_tm_stats);
// Add other pools here
}
// Configure thread pool
else if (strcmp(pp_change->key, DTM_MAX_NUM_THREADS) == 0)
{
lv_value = atoi (pp_change->value);
if (lv_value >= 1)
gv_tm_info.threadPool()->setConfig(gv_tm_info.tm_stats(), lv_value);
}
else if (strcmp(pp_change->key, DTM_STEADYSTATE_LOW_THREADS) == 0)
{
lv_value = atoi (pp_change->value);
if (lv_value >= 0)
gv_tm_info.threadPool()->setConfig(gv_tm_info.tm_stats(), -1, lv_value);
}
else if (strcmp(pp_change->key, DTM_STEADYSTATE_HIGH_THREADS) == 0)
{
lv_value = atoi (pp_change->value);
if (lv_value >= 0)
gv_tm_info.threadPool()->setConfig(gv_tm_info.tm_stats(), -1, -1, lv_value);
}
// Configure transaction pool
else if (strcmp(pp_change->key, DTM_MAX_NUM_TRANS) == 0)
{
lv_value = atoi (pp_change->value);
if (lv_value >= 1)
gv_tm_info.transactionPool()->setConfig(gv_tm_info.tm_stats(), lv_value);
}
else if (strcmp(pp_change->key, DTM_STEADYSTATE_LOW_TRANS) == 0)
{
lv_value = atoi (pp_change->value);
if (lv_value >= 0)
gv_tm_info.transactionPool()->setConfig(gv_tm_info.tm_stats(), -1, lv_value);
}
else if (strcmp(pp_change->key, DTM_STEADYSTATE_HIGH_TRANS) == 0)
{
lv_value = atoi (pp_change->value);
if (lv_value >= 0)
gv_tm_info.transactionPool()->setConfig(gv_tm_info.tm_stats(), -1, -1, lv_value);
}
else if (strcmp(pp_change->key, DTM_CP_INTERVAL) == 0)
{
lv_value = atoi (pp_change->value);
if (lv_value > 0)
lv_value *= 60000; // 60 (mins to secs) * 1000 (secs to msecs)
if (lv_value >= 0 && lv_value != gv_tm_info.cp_interval())
{
// Cancel the TmTimer control point event and re-add with the
// new interval.
gv_tm_info.cp_interval(lv_value);
gv_tm_info.tmTimer()->cancelControlpointEvent();
gv_tm_info.tmTimer()->addControlpointEvent(lv_value);
}
}
else if (strcmp(pp_change->key, DTM_STATS_INTERVAL) == 0)
{
lv_value = atoi (pp_change->value);
if (lv_value > 0)
lv_value *= 60000; // 60 (mins to secs) * 1000 (secs to msecs)
if (lv_value >= 0 && lv_value != gv_tm_info.stats_interval())
{
// Cancel the TmTimer stats event and re-add with the
// new interval.
gv_tm_info.stats_interval(lv_value);
gv_tm_info.tmTimer()->cancelStatsEvent();
gv_tm_info.tmTimer()->addStatsEvent(lv_value);
}
}
else if (strcmp(pp_change->key, DTM_TM_RMRETRY_INTERVAL) == 0)
{
lv_value = atoi (pp_change->value);
if (lv_value > 0)
lv_value *= 60000; // 60 (mins to secs) * 1000 (secs to msecs)
if (lv_value >= 0 && lv_value != gv_tm_info.RMRetry_interval())
{
// Cancel the TmTimer stats event and re-add with the
// new interval.
gv_tm_info.RMRetry_interval(lv_value);
gv_tm_info.tmTimer()->cancelRMRetryEvent();
gv_tm_info.tmTimer()->addRMRetryEvent(lv_value);
}
}
else if (strcmp(pp_change->key, DTM_TX_ABORT_TIMEOUT) == 0)
{
lv_value = atoi (pp_change->value);
if (lv_value != -1 && lv_value <= 0)
gv_tm_info.timeout(TX_ABORT_TIMEOUT); //Default
else
gv_tm_info.timeout(lv_value);
}
else if (strcmp(pp_change->key, DTM_TEST_PAUSE_STATE) == 0)
{
lv_value = atoi (pp_change->value);
if (lv_value < TM_TX_STATE_NOTX || lv_value > TM_TX_STATE_LAST)
{
if (lv_value == -2)
{
TMTrace(1,("DTM_TEST_PAUSE_STATE set to %d, type %d = random!\n",
lv_value, gv_pause_state_type));
srand(time(NULL));
gv_pause_state_type = TX_PAUSE_STATE_TYPE_RANDOM;
gv_pause_state = rand() % TM_TX_STATE_LAST; //starting point
}
else
{
TMTrace(1,("DTM_TEST_PAUSE_STATE set to default (-1) because %d not a value state, type %d.\n",
lv_value, TM_TX_STATE_NOTX));
gv_pause_state = -1; //Default
}
}
else
{
TMTrace(1,("DTM_TEST_PAUSE_STATE set to %d, type %d.\n", lv_value, gv_pause_state_type));
gv_pause_state = lv_value;
}
}
else if (strcmp(pp_change->key, DTM_RM_PARTIC) == 0)
{
lv_value = atoi (pp_change->value);
gv_tm_info.RMPartic(lv_value);
TMTrace (1, ("DTM_RM_PARTIC set to %d.\n", gv_tm_info.RMPartic()));
}
else if (strcmp(pp_change->key, DTM_TM_TS_MODE) == 0)
{
lv_value = atoi (pp_change->value);
gv_tm_info.TSMode((TS_MODE) lv_value);
TMTrace (1, ("DTM_TM_TS_MODE set to %d.\n", gv_tm_info.TSMode()));
}
else if (strcmp(pp_change->key, DTM_TM_SHUTDOWNABRUPTNOW) == 0)
{
lv_value = atoi (pp_change->value);
if (lv_value == 1)
{
TMTrace (1, ("DTM_TM_SHUTDOWNABRUPTNOW set, calling shutdown, abrupt. Use for testing only!!\n"));
tm_log_event(DTM_ERROR_SHUTDOWN_DEBUG, SQ_LOG_INFO, "DTM_ERROR_SHUTDOWN_DEBUG");
msg_mon_shutdown(MS_Mon_ShutdownLevel_Abrupt);
}
}
else if (strcmp(pp_change->key, DTM_BROADCAST_ROLLBACKS) == 0)
{
bool lv_changed = false;
TM_BROADCAST_ROLLBACKS lv_broadcast_rollbacks = (TM_BROADCAST_ROLLBACKS) atoi (pp_change->value);
switch (lv_broadcast_rollbacks)
{
case TM_BROADCAST_ROLLBACKS_NO:
case TM_BROADCAST_ROLLBACKS_YES:
case TM_BROADCAST_ROLLBACKS_DEBUG:
gv_tm_info.broadcast_rollbacks(lv_broadcast_rollbacks);
lv_changed = true;
break;
}
if (lv_changed)
{
tm_log_event (DTM_BROADCAST_ROLLBACKS_INFO, SQ_LOG_INFO,"DTM_BROADCAST_ROLLBACKS_INFO",
-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,gv_tm_info.broadcast_rollbacks());
TMTrace(1, ("DTM_BROADCAST_ROLLBACKS changed to %d.\n", gv_tm_info.broadcast_rollbacks()));
}
}
}
// tm_process_monitor_msg
// Purpose - when a monitor message is received, this is called
// ------------------------------------------------------------
void tm_process_monitor_msg(BMS_SRE *pp_sre, char *pp_buf)
{
CTmTxBase *lp_tx = NULL;
MS_Mon_Msg lv_msg;
if (pp_buf == NULL)
{
tm_log_event(DTM_INVALID_PROC_MON_MSG, SQ_LOG_CRIT, "DTM_INVALID_PROC_MON_MSG");
TMTrace(1, ("tm_process_monitor_msg ENTER, data null, exiting \n"));
abort ();
}
memcpy (&lv_msg, pp_buf, sizeof (MS_Mon_Msg));
TMTrace(2, ("tm_process_monitor_msg ENTRY, type=%d\n", lv_msg.type));
if (lv_msg.type != MS_MsgType_NodeQuiesce)
// Delay reply for quiesce processing.
// At this point we will not reply with an error so get the reply
// out of the way so we can do some real processing
XMSG_REPLY_(pp_sre->sre_msgId, /*msgid*/
NULL, /*replyctrl*/
0, /*replyctrlsize*/
NULL, /*replydata*/
0, /*replydatasize*/
0, /*errorclass*/
NULL); /*newphandle*/
switch (lv_msg.type)
{
case MS_MsgType_Change:
{
tm_process_registry_change(&lv_msg.u.change);
break;
}
case MS_MsgType_Shutdown:
{
// If the TM is already shutting down, we don't want to change the state back to TM_STATE_SHUTTING_DOWN
if (gv_tm_info.state_shutdown())
{
TMTrace(1, ("tm_process_monitor_msg Shutdown notice, level %d. Duplicate notice ignored!\n",
lv_msg.u.shutdown.level));
tm_log_event(DTM_DUPLICATE_SHUTDOWN_NOTICE, SQ_LOG_CRIT, "DTM_DUPLICATE_SHUTDOWN_NOTICE",
FEDUP,-1,gv_tm_info.nid(),-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,lv_msg.u.shutdown.level);
}
else
{
TMTrace(1, ("tm_process_monitor_msg Shutdown notice, level %d.\n",
lv_msg.u.shutdown.level));
tm_log_event(DTM_SHUTDOWN_NOTICE, SQ_LOG_INFO, "DTM_SHUTDOWN_NOTICE",
-1,-1,gv_tm_info.nid(),-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,lv_msg.u.shutdown.level);
gv_tm_info.state(TM_STATE_SHUTTING_DOWN);
gv_tm_info.shutdown_level(lv_msg.u.shutdown.level);
if (lv_msg.u.shutdown.level == MS_Mon_ShutdownLevel_Immediate)
tm_abort_all_transactions(true);
}
// if the shutdown mode is MS_Mon_ShutdownLevel_Normal, go back to the main loop
// to service user's request for committing or aborting the outstanding txs.
// If the shutdown mode is MS_Mon_ShutdownLevel_Abrupt, go back to the main loop
// and wait for the monitor to kill all the TMs.-
break;
} // MS_MsgType_Shutdown
case MS_MsgType_NodeDown:
{
TMTrace(3, ("tm_process_monitor_msg NodeDown notice for nid %d\n", lv_msg.u.down.nid));
// Appoint new Lead TM if necessary.
tm_get_leader_info();
if (gv_tm_info.lead_tm() == false)
{
gv_tm_info.down_without_sync(lv_msg.u.death.nid, true);
TMTrace(3, ("tm_process_monitor_msg - setting down_without_sync to TRUE for node %d\n",
lv_msg.u.death.nid))
}
// Process the death notice for the logical node which died
// We may already have processed a node down message, depending on the Seaquest
// environment - configurations with spares don't send node down.
if (gv_tm_info.tm_is_up(lv_msg.u.death.nid))
tm_process_node_down_msg(lv_msg.u.death.nid);
// If we're the lead TM, attempt to recover the TM.
if (gv_tm_info.lead_tm() == true)
gv_tm_info.addTMRestartRetry(lv_msg.u.death.nid, 0);
break;
}
case MS_MsgType_NodeUp:
{
TMTrace(1, ("tm_process_monitor_msg NodeUp notice for nid %d\n", lv_msg.u.up.nid));
tm_log_event(DTM_NODEUP, SQ_LOG_INFO, "DTM_NODEUP",
-1,-1,gv_tm_info.nid(),-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
NULL,lv_msg.u.up.nid);
break;
}
case MS_MsgType_NodePrepare:
{
TMTrace(1, ("tm_process_monitor_msg NodePrepare notice for nid %d\n", lv_msg.u.prepare.nid));
tm_log_event(DTM_NODEPREPARE, SQ_LOG_INFO, "DTM_NODEPREPARE",
-1,-1,gv_tm_info.nid(),-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
NULL,lv_msg.u.prepare.nid);
if (gv_tm_info.lead_tm()) {
gv_tm_info.restart_tm_process(lv_msg.u.prepare.nid);
}
break;
}
case MS_MsgType_TmRestarted:
{
TMTrace(1, ("tm_process_monitor_msg TMRestarted notice for nid %d\n", lv_msg.u.tmrestarted.nid));
// Appoint new Lead TM if necessary.
tm_get_leader_info();
if (gv_tm_info.lead_tm() == false)
{
gv_tm_info.down_without_sync(lv_msg.u.tmrestarted.nid, true);
TMTrace(3, ("tm_process_monitor_msg - setting down_without_sync to TRUE for node %d\n",
lv_msg.u.tmrestarted.nid))
}
tm_log_event(DTM_TMRESTARTED, SQ_LOG_INFO, "DTM_TMRESTARTED",
-1,-1,gv_tm_info.nid(),-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
NULL,lv_msg.u.tmrestarted.nid);
if (gv_tm_info.lead_tm()) {
gv_tm_info.open_restarted_tm(lv_msg.u.tmrestarted.nid);
}
break;
}
case MS_MsgType_ProcessDeath:
{
TMTrace(3, ("tm_process_monitor_msg Process Death notice for %s\n",
lv_msg.u.death.process_name));
switch (lv_msg.u.death.type)
{
case MS_ProcessType_TSE:
{
tm_log_event(DTM_PROCDEATH_TSE, SQ_LOG_INFO, "DTM_PROCDEATH_TSE",
-1,-1,gv_tm_info.nid(),-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,lv_msg.u.death.pid,-1,-1,
lv_msg.u.death.process_name,lv_msg.u.death.nid);
TMTrace(1, ("tm_process_monitor_msg death notice for TSE %s (%d, %d).\n",
lv_msg.u.death.process_name, lv_msg.u.death.nid, lv_msg.u.death.pid));
// Check to see if the TSE is still alive - this will indicate a
// failover rather than a crash/stop.
int lv_nid;
int lv_pid;
int lv_ret = msg_mon_get_process_info ((char *) &lv_msg.u.death.process_name,
&lv_nid, &lv_pid);
// Mark TSE as failed in RM list
if (lv_ret != FEOK || lv_pid == -1)
{
tm_log_event(DTM_TSE_FAILURE_DETECTED, SQ_LOG_WARNING, "DTM_TSE_FAILURE_DETECTED",
lv_ret,-1,gv_tm_info.nid(),-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,lv_msg.u.death.pid,-1,-1,
lv_msg.u.death.process_name,lv_msg.u.death.nid);
TMTrace(1, ("tm_process_monitor_msg Failure detected for TSE %s (%d, %d).\n",
lv_msg.u.death.process_name, lv_msg.u.death.nid, lv_msg.u.death.pid));
gv_RMs.TSE()->fail_rm(lv_msg.u.death.nid, lv_msg.u.death.pid);
}
else
{
// Ignore failovers, they're transparent apart from an error
// 201 for any outstanding I/Os.
RM_Info_TSEBranch * lp_RM;
for (int lv_inx=0;
lv_inx < gv_RMs.TSE()->return_highest_index_used();
lv_inx++)
{
lp_RM = gv_RMs.TSE()->return_slot_by_index(lv_inx);
if (lp_RM &&
!strcmp(lp_RM->pname(),
(char *) &lv_msg.u.death.process_name))
{
lp_RM->nid(lv_nid);
lp_RM->pid(lv_pid);
}
}
tm_log_event(DTM_TSE_FAILOVER_DETECTED, SQ_LOG_INFO, "DTM_TSE_FAILOVER_DETECTED",
-1,-1,gv_tm_info.nid(),-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,lv_pid,-1,-1,
lv_msg.u.death.process_name,lv_nid);
TMTrace(1, ("tm_process_monitor_msg failover detected for TSE %s. New primary is (%d, %d).\n",
lv_msg.u.death.process_name, lv_nid, lv_pid));
}
break;
}
case MS_ProcessType_ASE:
{
// Don't care unless its the TLOG. TODO
tm_log_event(DTM_PROCDEATH_ASE, SQ_LOG_INFO, "DTM_PROCDEATH_ASE",
-1,-1,gv_tm_info.nid(),-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,lv_msg.u.death.pid,-1,-1,
lv_msg.u.death.process_name,lv_msg.u.death.nid);
TMTrace(1, ("tm_process_monitor_msg death notice for ASE %s (%d, %d).\n",
lv_msg.u.death.process_name, lv_msg.u.death.nid, lv_msg.u.death.pid));
break;
}
case MS_ProcessType_DTM:
{
tm_log_event(DTM_PROCDEATH_DTM, SQ_LOG_INFO, "DTM_PROCDEATH_DTM",
-1,-1,gv_tm_info.nid(),-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
lv_msg.u.death.process_name);
TMTrace(1, ("tm_process_monitor_msg death notice for DTM%d\n", lv_msg.u.death.nid));
break;
}
// most likely application death. If not, then the tx will come back NULL
// and we'll just return
default :
{
TMTrace(1, ("tm_process_monitor_msg death notice for process type %d\n",lv_msg.u.death.type ));
TM_Txid_Internal *lp_transid = (TM_Txid_Internal *)&lv_msg.u.death.transid;
lp_tx = (CTmTxBase *) gv_tm_info.get_tx(lp_transid);
if (lp_tx != NULL)
lp_tx->schedule_abort();
// this is a regular process death not associated with a transid. Find them....
else
{
TMTrace(3, ("tm_process_monitor_msg death notice for pid %d on nid %d\n",
lv_msg.u.death.pid, lv_msg.u.death.nid ));
int64 lv_count = 0;
int64 lv_size = 0;
void **lp_tx_list = gv_tm_info.get_all_txs (&lv_size);
if (!lp_tx_list)
break;
TM_TX_Info *lp_current_tx = (TM_TX_Info *)lp_tx_list[lv_count];
while ((lv_count < lv_size) && lp_current_tx)
{
if (lp_current_tx->is_app_partic(lv_msg.u.death.pid, lv_msg.u.death.nid))
{
TMTrace(3, ("tm_process_monitor_msg aborting seq num %d\n",
lp_current_tx->seqnum() ));
lp_current_tx->remove_app_partic(lv_msg.u.death.pid, lv_msg.u.death.nid);
lp_current_tx->schedule_abort();
}
lv_count++;
if (lv_count < lv_size)
lp_current_tx = (TM_TX_Info*)lp_tx_list[lv_count];
}
if (lp_tx_list)
delete []lp_tx_list;
}
break;
}
}
break;
}
case MS_MsgType_NodeQuiesce:
{
TMTrace(3, ("tm_process_monitor_msg NodeQuiesce notice.\n"));
tm_process_node_quiesce_msg();
XMSG_REPLY_(pp_sre->sre_msgId, /*msgid*/
NULL, /*replyctrl*/
0, /*replyctrlsize*/
NULL, /*replydata*/
0, /*replydatasize*/
0, /*errorclass*/
NULL); /*newphandle*/
break;
}
case MS_MsgType_TmSyncAbort:
{
// There can be many monitor replies, so circle through them all
for (int lv_count = 0; lv_count < lv_msg.u.tmsync.count; lv_count++)
{
// We use sync handles for receiving DTMs and sync tags for originating DTMs. Right now this
// is required because the tm_sync_cb() is passed the handle.
Tm_Sync_Data *lp_sync_data = (Tm_Sync_Data *)gv_sync_map.get(lv_msg.u.tmsync.handle[lv_count]);
// originating DTM
if (lp_sync_data == NULL)
{
tm_originating_sync_abort (lv_msg.u.tmsync.orig_tag[lv_count]);
}
// recipient DTM
else
{
gv_sync_map.remove(lv_msg.u.tmsync.handle[lv_count]);
delete lp_sync_data;
}
}
break; // case MS_MsgType_TmSyncAbort
}
case MS_MsgType_TmSyncCommit:
{
// There can be many monitor replies, so circle through them all
for (int lv_count = 0; lv_count < lv_msg.u.tmsync.count; lv_count++)
{
// We use sync handles for receiving DTMs and sync tags for originating DTMs. Right now this
// is required because the tm_sync_cb() is passed the handle.
Tm_Sync_Data *lp_sync_data = (Tm_Sync_Data *)gv_sync_map.get(lv_msg.u.tmsync.handle[lv_count]);
// originating DTM
if (lp_sync_data == NULL)
{
tm_originating_sync_commit(lv_msg.u.tmsync.orig_tag[lv_count]);
}
// receipient DTM
else
{
tm_recipient_sync_commit(lp_sync_data);
gv_sync_map.remove(lv_msg.u.tmsync.handle[lv_count]);
delete lp_sync_data;
}
}
break;
}
case MS_MsgType_Event:
case MS_MsgType_UnsolicitedMessage:
default:
{
break;
}
};
TMTrace(2, ("tm_process_monitor_msg EXIT\n"));
}
// -----------------------------------------------------------------------
// tm_process_msg
// Purpose - process messages incoming to the TM
// -----------------------------------------------------------------------
void tm_process_msg(BMS_SRE *pp_sre)
{
short lv_ret;
char la_send_buffer[4096];
char la_recv_buffer[sizeof(Tm_Req_Msg_Type)];
char *la_recv_buffer_ddl = NULL;
Tm_Broadcast_Req_Type *lp_br_req;
Tm_Broadcast_Rsp_Type *lp_br_rsp;
Tm_Perf_Stats_Req_Type *lp_ps_req;
Tm_Perf_Stats_Rsp_Type *lp_ps_rsp;
//Tm_Sys_Status_Req_Type *lp_ss_req;
Tm_Sys_Status_Rsp_Type *lp_ss_rsp;
Tm_RolloverCP_Req_Type *lp_rc_req;
Tm_RolloverCP_Rsp_Type *lp_rc_rsp;
Tm_Control_Point_Req_Type *lp_cp_req;
MESSAGE_HEADER_SQ *lp_msg_hdr;
CTmTxMessage *lp_msg;
TMTrace(2, ("tm_process_msg ENTRY\n"));
if((unsigned)(pp_sre->sre_reqDataSize) > (sizeof(Tm_Req_Msg_Type))){
la_recv_buffer_ddl = new char[pp_sre->sre_reqDataSize];
lv_ret = BMSG_READDATA_(pp_sre->sre_msgId, // msgid
la_recv_buffer_ddl, // reqdata
pp_sre->sre_reqDataSize); // bytecount
}else{
lv_ret = BMSG_READDATA_(pp_sre->sre_msgId, // msgid
la_recv_buffer, // reqdata
pp_sre->sre_reqDataSize); // bytecount
}
if (lv_ret != 0)
{
// a return value of 1 means the message has been abandoned by the sender.
if (lv_ret == 1)
{
tm_log_event(DTM_TM_READ_MSG_FAIL, SQ_LOG_WARNING, "DTM_TM_READ_MSG_FAIL", lv_ret);
TMTrace(1, ("tm_process_msg : BMSG_READDATA_ failed with error %d. Message ignored!\n", lv_ret));
return;
}
else
{
tm_log_event(DTM_TM_READ_MSG_FAIL, SQ_LOG_CRIT, "DTM_TM_READ_MSG_FAIL", lv_ret);
TMTrace(1, ("tm_process_msg : BMSG_READDATA_ failed with error %d\n", lv_ret));
abort();
}
}
if (pp_sre->sre_flags & XSRE_MON)
{
tm_process_monitor_msg(pp_sre, la_recv_buffer);
return;
}
lp_msg_hdr = (MESSAGE_HEADER_SQ *)&la_recv_buffer;
TMTrace(3, ("tm_process_msg : tm %d, type %d, msgid %d\n",
gv_tm_info.nid(), lp_msg_hdr->rr_type.request_type, pp_sre->sre_msgId));
// Test the message version and make sure not too low OR too high
if ((lp_msg_hdr->version.request_version < TM_SQ_MSG_VERSION_MINIMUM) ||
(lp_msg_hdr->version.request_version > TM_SQ_MSG_VERSION_CURRENT))
{
tm_log_event(DTM_TM_MSG_VERSION_INVALID, SQ_LOG_CRIT, "DTM_TM_MSG_VERSION_INVALID");
TMTrace(1, ("tm_process_msg : Old message received. Minimum supported=%d, "
"Received message version=%d\n",
TM_SQ_MSG_VERSION_MINIMUM,
lp_msg_hdr->version.request_version));
// Reply with error since illegal version
XMSG_REPLY_(pp_sre->sre_msgId, // msgid
NULL, // replyctrl
0, // replyctrlsize
NULL, // replydata
0, // replydatasize
FEINCOMPATIBLEVERSION, // errorclass
NULL); // newphandle
return;
}
switch (lp_msg_hdr->rr_type.request_type)
{
case TM_MSG_TYPE_BROADCAST:
{
lp_br_req = (Tm_Broadcast_Req_Type *) la_recv_buffer;
lp_br_rsp = (Tm_Broadcast_Rsp_Type *) la_send_buffer;
tm_initialize_rsp_hdr(lp_br_req->iv_msg_hdr.rr_type.request_type,
(Tm_Rsp_Msg_Type *) lp_br_rsp);
tm_process_req_broadcast (pp_sre, lp_br_req, lp_br_rsp);
TMTrace(2, ("tm_process_msg EXIT\n"));
return;
}
case TM_MSG_TYPE_TMPERFSTATS:
{
lp_ps_req = (Tm_Perf_Stats_Req_Type *) la_recv_buffer;
lp_ps_rsp = (Tm_Perf_Stats_Rsp_Type *) la_send_buffer;
if (gv_tm_info.lead_tm())
{
// We ignore unexpected Perf Stats request because they can happen shortly after
// a lead TM migration.
TMTrace(1, ("tm_process_msg : Warning ignoring Performance Statistics request received by Lead TM from nid %d\n",
lp_ps_req->iv_sending_tm_nid));
tm_log_event(DTM_TM_UNEXPECTED_PS_RECEIVED, SQ_LOG_WARNING, "DTM_TM_UNEXPECTED_PS_RECEIVED",
-1, -1, gv_tm_info.nid(), -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-1, "Lead TM received Performance Statistics request", lp_ps_req->iv_sending_tm_nid);
}
tm_fill_perf_stats_buffer(lp_ps_rsp);
ushort lv_len = sizeof(Tm_Perf_Stats_Rsp_Type);
XMSG_REPLY_(pp_sre->sre_msgId, // msgid
NULL, // replyctrl
0, // replyctrlsize
(char *) lp_ps_rsp, // replydata
lv_len, // replydatasize
0, // errorclass
NULL); // newphandle
TMTrace(2, ("tm_process_msg EXIT\n"));
return;
}
case TM_MSG_TYPE_CALLSTATUSSYSTEM:
{
//lp_ss_req = (Tm_Sys_Status_Req_Type *) la_recv_buffer;
lp_ss_rsp = (Tm_Sys_Status_Rsp_Type *) la_send_buffer;
TM_STATUSSYS *lp_system_status = new TM_STATUSSYS();
gv_tm_info.send_system_status(&lp_ss_rsp->iv_status_system);
ushort lv_len = sizeof(Tm_Sys_Status_Rsp_Type);
XMSG_REPLY_(pp_sre->sre_msgId, // msgid
NULL, // replyctrl
0, // replyctrlsize
(char *) lp_ss_rsp, // replydata
lv_len, // replydatasize
0, // errorclass
NULL); // newphandle
delete lp_system_status;
TMTrace(2, ("tm_process_msg EXIT\n"));
return;
}
case TM_MSG_TYPE_STATUSSYSTEM:
{
//lp_ss_req = (Tm_Sys_Status_Req_Type *) la_recv_buffer;
lp_ss_rsp = (Tm_Sys_Status_Rsp_Type *) la_send_buffer;
tm_fill_sys_status_buffer(lp_ss_rsp);
ushort lv_len = sizeof(Tm_Perf_Stats_Rsp_Type);
XMSG_REPLY_(pp_sre->sre_msgId, // msgid
NULL, // replyctrl
0, // replyctrlsize
(char *) lp_ss_rsp, // replydata
lv_len, // replydatasize
0, // errorclass
NULL); // newphandle
TMTrace(2, ("tm_process_msg EXIT\n"));
return;
}
case TM_MSG_TYPE_ROLLOVER_CP:
{
lp_rc_req = (Tm_RolloverCP_Req_Type *) la_recv_buffer;
lp_rc_rsp = (Tm_RolloverCP_Rsp_Type *) la_send_buffer;
int64 lv_sequence_no = lp_rc_req->iv_sequence_no;
TMTrace(2, ("tm_control_point_rollover nid: %d, position: %ld\n", lp_rc_req->iv_nid, lv_sequence_no));
// May write more than one control point if lv_sequence_no == 1 and iv_audit_seqno != 1
if((lv_sequence_no > gv_tm_info.audit_seqno()) || ((lv_sequence_no == 1) && (gv_tm_info.audit_seqno() !=1))) {
gv_tm_info.audit_seqno(lv_sequence_no);
gv_tm_info.addControlPointEvent();
}
ushort lv_len = sizeof(Tm_RolloverCP_Rsp_Type);
XMSG_REPLY_(pp_sre->sre_msgId, // msgid
NULL, // replyctrl
0, // replyctrlsize
(char *) lp_rc_rsp, // replydata
lv_len, // replydatasize
0, // errorclass
NULL); // newphandle
TMTrace(2, ("tm_process_msg EXIT\n"));
return;
}
case TM_MSG_TYPE_CP:
{
lp_cp_req = (Tm_Control_Point_Req_Type *) la_recv_buffer;
TMTrace(3, ("tm_process_msg : Control Point from Lead TM nid %d, type %d, startup %d.\n",
lp_cp_req->iv_sending_tm_nid, lp_cp_req->iv_type, lp_cp_req->iv_startup));
if (gv_tm_info.lead_tm())
{
// We ignore these unexpected control points because they can happen shortly after
// a lead TM migration.
TMTrace(1, ("tm_process_msg : Control Point request received by Lead TM from nid %d\n",
lp_cp_req->iv_sending_tm_nid));
tm_log_event(DTM_TM_UNEXPECTED_CP_RECEIVED, SQ_LOG_WARNING, "DTM_TM_UNEXPECTED_CP_RECEIVED",
-1, -1, gv_tm_info.nid(), -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-1, "Lead TM received Control Point request", lp_cp_req->iv_sending_tm_nid);
}
else
{
if (lp_cp_req->iv_startup)
{
TMTrace(3, ("tm_process_msg : Control Point startup from Lead TM nid %d.\n",
lp_cp_req->iv_sending_tm_nid));
gv_tm_info.schedule_init_and_recover_rms();
}
else
{
gv_system_tx_count = 0;
gv_tm_info.write_all_trans_state();
}
}
Tm_Control_Point_Rsp_Type *lp_rsp2 =
(Tm_Control_Point_Rsp_Type *) la_send_buffer;
lp_rsp2->iv_error = 0;
lp_rsp2->iv_msg_hdr.rr_type.reply_type =
(short) (lp_msg_hdr->rr_type.request_type + 1);
lp_rsp2->iv_msg_hdr.miv_err.error = 0;
tm_send_reply(pp_sre->sre_msgId, (Tm_Rsp_Msg_Type *)lp_rsp2);
return;
}
case TM_MSG_TYPE_SHUTDOWN_COMPLETE:
{
TMTrace(3, ("tm_process_msg SHUTDOWN_COMPLETE message received.\n"));
// This is the shutdown_complete inquiry from the lead TM. Reply if all
// active txs have been aborted or commited and all RMs are closed.
// If the lead TM fails during the Seaquest Shutdown coordination,
// a new lead TM can take over and resend this inquiry message.
// That's ok. Just reply to the message again.
Tm_Shutdown_Rsp_Type *lp_rsp_shutdown =
(Tm_Shutdown_Rsp_Type *) la_send_buffer;
bool lv_shutdown = false;
lp_rsp_shutdown->iv_msg_hdr.rr_type.reply_type =
(short) (lp_msg_hdr->rr_type.request_type + 1);
switch (gv_tm_info.state())
{
case TM_STATE_WAITING_RM_OPEN:
case TM_STATE_UP:
case TM_STATE_SHUTTING_DOWN:
case TM_STATE_TX_DISABLED:
case TM_STATE_TX_DISABLED_SHUTDOWN_PHASE1:
case TM_STATE_QUIESCE:
case TM_STATE_DRAIN:
{
lp_rsp_shutdown->iv_error = FETMSHUTDOWN_NOTREADY;
break;
}
case TM_STATE_SHUTDOWN_COMPLETED:
case TM_STATE_DOWN:
{
TMTrace(3, ("tm_process_msg shutdown complete.\n"));
lp_rsp_shutdown->iv_error = FEOK;
lv_shutdown = true;
break;
}
default:
{
TMTrace(3, ("tm_process_msg shutdown dirty.\n"));
lp_rsp_shutdown->iv_error = FETMSHUTDOWN_FATAL_ERR;
lv_shutdown = true;
break;
}
}
tm_send_reply(pp_sre->sre_msgId, (Tm_Rsp_Msg_Type *) lp_rsp_shutdown);
if (lv_shutdown)
{
TMTrace(1, ("SHUTDOWN : Non Lead DTM%d shutting down, TM state %d.\n", gv_tm_info.nid(), gv_tm_info.state()));
msg_mon_process_shutdown();
TMTrace(1, ("$TM%d exiting. TM state %d.\n",
gv_tm_info.nid(), gv_tm_info.state()));
exit(0);
}
return;
}
default:
break;
}// switch
// Allocate a message object. It will be deleted by the
// TM_TX_Info::process_eventQ method once the request
// has been processed.
if( la_recv_buffer_ddl!=NULL)
lp_msg = new CTmTxMessage((Tm_Req_Msg_Type *) la_recv_buffer_ddl, pp_sre->sre_msgId, la_recv_buffer_ddl);
else
lp_msg = new CTmTxMessage((Tm_Req_Msg_Type *) &la_recv_buffer, pp_sre->sre_msgId, NULL);
if (lp_msg_hdr->dialect_type == DIALECT_TM_DP2_SQ)
{
tm_process_msg_from_xarm(lp_msg);
TMTrace(2, ("tm_process_msg EXIT. XARM Request detected.\n"));
return;
}
switch (lp_msg->requestType())
{
case TM_MSG_TYPE_BEGINTRANSACTION:
tm_process_req_begin(lp_msg);
break;
case TM_MSG_TYPE_ENDTRANSACTION:
tm_process_req_end(lp_msg);
break;
case TM_MSG_TYPE_ABORTTRANSACTION:
tm_process_req_abort(lp_msg);
break;
case TM_MSG_TYPE_STATUSTRANSACTION:
tm_process_req_status (lp_msg);
break;
case TM_MSG_TYPE_LISTTRANSACTION:
tm_process_req_list (lp_msg);
break;
case TM_MSG_TYPE_TMSTATS:
tm_process_req_tmstats (lp_msg);
break;
case TM_MSG_TYPE_STATUSTM:
tm_process_req_statustm (lp_msg);
break;
case TM_MSG_TYPE_ATTACHRM:
tm_process_req_attachrm (lp_msg);
break;
case TM_MSG_TYPE_STATUSTRANSMGMT:
tm_process_req_status_transmgmt(lp_msg);
break;
case TM_MSG_TYPE_STATUSALLTRANSMGT:
tm_process_req_status_all_transmgmt(lp_msg);
break;
case TM_MSG_TYPE_GETTRANSINFO:
tm_process_req_status_gettransinfo(lp_msg);
break;
case TM_MSG_TYPE_LEADTM:
tm_process_req_leadtm (lp_msg);
break;
case TM_MSG_TYPE_ENABLETRANS:
tm_process_req_enabletrans (lp_msg);
break;
case TM_MSG_TYPE_DISABLETRANS:
tm_process_req_disabletrans (lp_msg);
break;
case TM_MSG_TYPE_DRAINTRANS:
tm_process_req_draintrans (lp_msg);
break;
case TM_MSG_TYPE_QUIESCE:
tm_process_node_quiesce_msg(lp_msg);
break;
case (TM_MSG_TYPE_ENABLETRANS + TM_TM_MSG_OFFSET):
// Non-lead TM enableTrans arriving from lead TM
gv_tm_info.enableTrans(lp_msg);
break;
case (TM_MSG_TYPE_DISABLETRANS + TM_TM_MSG_OFFSET):
// Non-lead TM disableTrans arriving from lead TM
gv_tm_info.disableTrans(lp_msg);
break;
case (TM_MSG_TXINTERNAL_SHUTDOWNP1_WAIT + TM_TM_MSG_OFFSET):
// Non-lead TM ShutdownPhase1Wait arriving from lead TM
gv_tm_info.disableTrans(lp_msg);
break;
case TM_MSG_TYPE_AX_REG:
tm_process_req_ax_reg (lp_msg);
break;
case TM_MSG_TYPE_JOINTRANSACTION:
tm_process_req_join_trans (lp_msg);
break;
case TM_MSG_TYPE_SUSPENDTRANSACTION:
tm_process_req_suspend_trans (lp_msg);
break;
case TM_MSG_TYPE_AX_UNREG:
tm_process_req_ax_unreg (lp_msg);
break;
case TM_MSG_TYPE_TEST_TX_COUNT:
lp_msg->response()->u.iv_count.iv_count = gv_tm_info.num_active_txs();
lp_msg->reply();
delete lp_msg;
break;
case TM_MSG_TYPE_DOOMTX:
tm_process_req_doomtx(lp_msg);
break;
case TM_MSG_TYPE_TSE_DOOMTX:
tm_process_req_TSE_doomtx(lp_msg);
break;
case TM_MSG_TYPE_WAIT_TMUP:
tm_process_req_wait_tmup(lp_msg);
break;
case TM_MSG_TYPE_REGISTERREGION:
tm_process_req_registerregion(lp_msg);
break;
case TM_MSG_TYPE_DDLREQUEST:
tm_process_req_ddlrequest(lp_msg);
break;
case TM_MSG_TYPE_REQUESTREGIONINFO:
tm_process_req_requestregioninfo(lp_msg);
break;
case TM_MSG_TYPE_GETNEXTSEQNUMBLOCK:
tm_process_req_GetNextSeqNum(lp_msg);
break;
default:
// EMS message here, DTM_INVALID_MESSAGE_TYPE
tm_log_event(DTM_INVALID_MESSAGE_TYPE2, SQ_LOG_CRIT , "DTM_INVALID_MESSAGE_TYPE2",
-1, /*error_code*/
-1, /*rmid*/
gv_tm_info.nid(), /*dtmid*/
-1, /*seq_num*/
-1, /*msgid*/
-1, /*xa_error*/
-1, /*pool_size*/
-1, /*pool_elems*/
-1, /*msg_retries*/
-1, /*pool_high*/
-1, /*pool_low*/
-1, /*pool_max*/
-1, /*tx_state*/
lp_msg->requestType()); /*data */
TMTrace(1, ("tm_process_msg - TM%d received UNKNOWN message type : %d\n",
gv_tm_info.nid(), lp_msg->requestType()));
// Reply with error since unknown request type
XMSG_REPLY_(pp_sre->sre_msgId, // msgid
NULL, // replyctrl
0, // replyctrlsize
NULL, // replydata
0, // replydatasize
FEINVALOP, // errorclass
NULL); // newphandle
return;
}
TMTrace(2, ("tm_process_msg EXIT\n"));
}
// ---------------------------------------------------------------
// tm_shutdown_helper
// Purpose -
// ----------------------------------------------------------------
void tm_shutdown_helper ()
{
TMTrace(2, ("tm_shutdown_helper ENTRY\n"));
if (gv_tm_info.num_active_txs() <= 0)
{
TMShutdown *lp_Shutdown = new TMShutdown(&gv_tm_info, gv_RMs.TSE()->return_rms());
gv_tm_info.shutdown_coordination_started(true);
lp_Shutdown->coordinate_shutdown();
delete lp_Shutdown;
if (gv_tm_info.lead_tm())
gv_tm_info.set_txnsvc_ready(TXNSVC_DOWN);
}
else
{
// wait 1/4 of a second, can be fine-tuned later
XWAIT(0, TM_SHUTDOWN_WAKEUP_INTERVAL);
}
TMTrace(2, ("tm_shutdown_helper EXIT\n"));
}
// ---------------------------------------------------------------
// tm_main_initialize
// Purpose - call all initialization routines
// --------------------------------------------------------------
void tm_main_initialize()
{
char la_leader_name[BUFSIZ];
char la_event_data[MS_MON_MAX_SYNC_DATA];
int32 lv_event_len;
int32 lv_leader_nid;
int32 lv_leader_pid;
// initialize and get TM leader information
gv_tm_info.initialize();
tm_xarm_initialize();
tm_log_event(DTM_TM_PROCESS_STARTUP, SQ_LOG_INFO, "DTM_TM_PROCESS_STARTUP",
-1,-1,gv_tm_info.nid());
/*lv_leader_error =*/ msg_mon_tm_leader_set(&lv_leader_nid,
&lv_leader_pid, la_leader_name);
gv_tm_info.lead_tm_nid(lv_leader_nid);
if (lv_leader_nid < 0 || lv_leader_nid >= MAX_NODES)
{
tm_log_event(DTM_TM_LEADTM_BAD, SQ_LOG_CRIT, "DTM_TM_LEADTM_BAD",
-1, -1, gv_tm_info.nid(), -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, NULL, lv_leader_nid);
abort();
}
if (lv_leader_nid == gv_tm_info.nid())
{
tm_log_event (DTM_TM_LEADTM_SET, SQ_LOG_INFO , "DTM_TM_LEADTM_SET",
-1, /*error_code*/
-1, /*rmid*/
gv_tm_info.nid(), /*dtmid*/
-1, /*seq_num*/
-1, /*msgid*/
-1, /*xa_error*/
-1, /*pool_size*/
-1, /*pool_elems*/
-1, /*msg_retries*/
-1, /*pool_high*/
-1, /*pool_low*/
-1, /*pool_max*/
-1, /*tx_state*/
-1, /*data */
-1, /*data1*/
-1,/*data2 */
NULL, /*string2*/
gv_tm_info.nid() /*node*/);
gv_tm_info.lead_tm(true);
//This must be system startup time. Wait for the events
// before performing system recovery.
//The AM and TSE events will be implemented in the future.
//msg_mon_event_wait (AM_TLOG_FIXUP_COMPLETED_EVENT_ID, &lv_event_len, la_event_data);
//msg_mon_event_wait (TSE_START_EVENT_ID, &lv_event_len, la_event_data);
msg_mon_event_wait (DTM_START_EVENT_ID, &lv_event_len, la_event_data);
}
//Start timer thread
TMTrace(1, ("tm_main_initialize, Starting timer Thread.\n"));
tm_start_timerThread();
//Start example thread
//tm_start_exampleThread();
tm_start_auditThread();
// Initialize the XA TM Library
xaTM_initialize(gv_tm_info.iv_trace_level, gv_tm_info.tm_stats(), gv_tm_info.tmTimer());
// Initialize the HBase TM Library
HbaseTM_initialize(gv_tm_info.iv_trace_level, gv_tm_info.tm_stats(), gv_tm_info.tmTimer(), gv_tm_info.nid());
if (gv_tm_info.lead_tm())
{
TMTrace(1, ("tm_main_initialize, I am Lead TM, $TM%d.\n",
gv_tm_info.nid()));
}
// open all tms before recovery, will return if not the lead
gv_tm_info.open_other_tms();
TMTrace(1, ("main : Lead DTM is on node %d\n", lv_leader_nid));
if (gv_tm_info.lead_tm())
{
// init_and_recover_rms() will invoke system recovery if this
// the Lead TM.
gv_tm_info.schedule_init_and_recover_rms();
gv_wait_interval = LEAD_DTM_WAKEUP_INTERVAL/10;
}
TMTrace(1, ("main : initialize complete, pid : %d, nid %d, cp interval %d\n",
gv_tm_info.pid(), gv_tm_info.nid(), (gv_tm_info.cp_interval()/60000)));
}
// ----------------------------------------------------------------
// main method
// ----------------------------------------------------------------
int main(int argc, char *argv[])
{
int16 lv_ret;
int32 lv_my_nid;
int32 lv_my_pid;
BMS_SRE lv_sre;
CALL_COMP_DOVERS(tm, argc, argv);
const int l_size = 20;
int l_idx = 0;
typedef struct l_element_
{
int16 lv_ret;
BMS_SRE lv_sre;
} l_element;
l_element l_array[l_size];
// get our pid info and initialize
msg_init(&argc, &argv);
// get our pid info and initialize
msg_mon_get_my_info2(&lv_my_nid, // mon node-id
&lv_my_pid, // mon process-id
NULL, // mon name
0, // mon name-len
NULL, // mon process-type
NULL, // mon zone-id
NULL, // os process-id
NULL, // os thread-id
NULL); // component-id
gv_tm_info.nid (lv_my_nid);
gv_tm_info.pid (lv_my_pid);
#ifdef MULTITHREADED_TM
XWAIT(0, -2);
#endif
msg_mon_process_startup(true); // server?
msg_debug_hook ("tm.hook", "tm.hook");
tm_init_logging();
msg_mon_tmsync_register(tm_sync_cb);
msg_mon_enable_mon_messages (1);
msg_enable_priority_queue();
// allow the DTM to use all the message descriptors
XCONTROLMESSAGESYSTEM(XCTLMSGSYS_SETRECVLIMIT,XMAX_SETTABLE_RECVLIMIT);
XCONTROLMESSAGESYSTEM(XCTLMSGSYS_SETSENDLIMIT,SEABED_MAX_SETTABLE_SENDLIMIT_TM);
tm_main_initialize();
for(;;)
{
int lv_msg_count = 0;
if ((gv_tm_info.state_shutdown()) &&
(!gv_tm_info.shutdown_coordination_started()))
{
tm_shutdown_helper();
}
XWAIT(LREQ, (int)gv_wait_interval); // 10 ms units
do
{
lv_ret = BMSG_LISTEN_((short *) &lv_sre, // sre
BLISTEN_ALLOW_IREQM,
0); // listenertag
l_array[l_idx].lv_ret = lv_ret;
memcpy((void *) &l_array[l_idx].lv_sre, (void *) &lv_sre, sizeof(lv_sre));
if (l_idx >= l_size-1)
l_idx = 0;
else
l_idx++;
if (lv_ret != BSRETYPE_NOWORK)
tm_process_msg(&lv_sre);
// come up for air to allow control point processing if need be
if (lv_msg_count++ > 100)
break;
} while (lv_ret != BSRETYPE_NOWORK);
}
}
| 1 | 18,344 | Odd that we'd set just element 299 of a 2000-byte buffer to null. But I see that this is the way it was before. | apache-trafodion | cpp |
@@ -28,3 +28,9 @@ type PeerAdder interface {
type ClosestPeerer interface {
ClosestPeer(addr swarm.Address) (peerAddr swarm.Address, err error)
}
+
+// ScoreFunc is implemented by components that need to score peers in a different way than XOR distance.
+type ScoreFunc func(peer swarm.Address) (score float32)
+
+// EachPeerFunc is a callback that is called with a peer and its PO
+type EachPeerFunc func(swarm.Address, uint8) (stop, jumpToNext bool, err error) | 1 | // Copyright 2020 The Swarm Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package topology
import (
"context"
"errors"
"io"
"github.com/ethersphere/bee/pkg/swarm"
)
var ErrNotFound = errors.New("no peer found")
var ErrWantSelf = errors.New("node wants self")
type Driver interface {
PeerAdder
ClosestPeerer
io.Closer
}
type PeerAdder interface {
AddPeer(ctx context.Context, addr swarm.Address) error
}
type ClosestPeerer interface {
ClosestPeer(addr swarm.Address) (peerAddr swarm.Address, err error)
}
| 1 | 9,861 | ScoreFunc is not used in this PR. Maybe to add it later when needed? | ethersphere-bee | go |
@@ -171,9 +171,9 @@ func customResponseForwarder(ctx context.Context, w http.ResponseWriter, resp pr
http.SetCookie(w, cookie)
}
- if redirects := md.HeaderMD.Get("Location"); len(redirects) > 0 {
- w.Header().Set("Location", redirects[0])
-
+ // Redirect if it's the browser (non-XHR).
+ redirects := md.HeaderMD.Get("Location")
+ if len(redirects) > 0 && isBrowser(copyRequestHeadersFromResponseWriter(w)) {
code := http.StatusFound
if st := md.HeaderMD.Get("Location-Status"); len(st) > 0 {
headerCodeOverride, err := strconv.Atoi(st[0]) | 1 | package mux
import (
"context"
"fmt"
"io"
"net/http"
"net/http/httptest"
"net/http/pprof"
"net/textproto"
"net/url"
"path"
"regexp"
"strconv"
"strings"
"github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
"golang.org/x/net/http2"
"golang.org/x/net/http2/h2c"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/reflection"
"google.golang.org/grpc/status"
"google.golang.org/protobuf/encoding/protojson"
"google.golang.org/protobuf/proto"
gatewayv1 "github.com/lyft/clutch/backend/api/config/gateway/v1"
"github.com/lyft/clutch/backend/service"
awsservice "github.com/lyft/clutch/backend/service/aws"
)
const (
xHeader = "X-"
xForwardedFor = "X-Forwarded-For"
xForwardedHost = "X-Forwarded-Host"
)
var apiPattern = regexp.MustCompile(`^/v\d+/`)
type assetHandler struct {
assetCfg *gatewayv1.Assets
next http.Handler
fileSystem http.FileSystem
fileServer http.Handler
}
func copyHTTPResponse(resp *http.Response, w http.ResponseWriter) {
for key, values := range resp.Header {
for _, val := range values {
w.Header().Add(key, val)
}
}
w.WriteHeader(resp.StatusCode)
_, _ = io.Copy(w, resp.Body)
}
func (a *assetHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
if apiPattern.MatchString(r.URL.Path) || r.URL.Path == "/healthcheck" {
// Serve from the embedded API handler.
a.next.ServeHTTP(w, r)
return
}
// Check if assets are okay to serve by calling the Fetch endpoint and verifying it returns a 200.
rec := httptest.NewRecorder()
origPath := r.URL.Path
r.URL.Path = "/v1/assets/fetch"
a.next.ServeHTTP(rec, r)
if rec.Code != http.StatusOK {
copyHTTPResponse(rec.Result(), w)
return
}
// Set the original path.
r.URL.Path = origPath
// Serve!
if f, err := a.fileSystem.Open(r.URL.Path); err != nil {
// If not a known static asset and an asset provider is configured, try streaming from the configured provider.
if a.assetCfg != nil && a.assetCfg.Provider != nil && strings.HasPrefix(r.URL.Path, "/static/") {
// We attach this header simply for observability purposes.
// Otherwise its difficult to know if the assets are being served from the configured provider.
w.Header().Set("x-clutch-asset-passthrough", "true")
asset, err := a.assetProviderHandler(r.Context(), r.URL.Path)
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
_, _ = w.Write([]byte(fmt.Sprintf("Error getting assets from the configured asset provider: %v", err)))
return
}
defer asset.Close()
_, err = io.Copy(w, asset)
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
_, _ = w.Write([]byte(fmt.Sprintf("Error getting assets from the configured asset provider: %v", err)))
return
}
return
}
// If not a known static asset serve the SPA.
r.URL.Path = "/"
} else {
_ = f.Close()
}
a.fileServer.ServeHTTP(w, r)
}
func (a *assetHandler) assetProviderHandler(ctx context.Context, urlPath string) (io.ReadCloser, error) {
switch a.assetCfg.Provider.(type) {
case *gatewayv1.Assets_S3:
aws, err := getAssetProviderService(a.assetCfg)
if err != nil {
return nil, err
}
awsClient, ok := aws.(awsservice.Client)
if !ok {
return nil, fmt.Errorf("Unable to aquire the aws client")
}
return awsClient.S3StreamingGet(
ctx,
a.assetCfg.GetS3().Region,
a.assetCfg.GetS3().Bucket,
path.Join(a.assetCfg.GetS3().Key, strings.TrimPrefix(urlPath, "/static")),
)
default:
return nil, fmt.Errorf("configured asset provider has not been implemented")
}
}
// getAssetProviderService is used in two different contexts
// Its invoked in the mux constructor which checks if the necessary service has been configured,
// if there is an asset provider which requires ones.
//
// Otherwise its used to get the service for an asset provider in assetProviderHandler() if necessary.
func getAssetProviderService(assetCfg *gatewayv1.Assets) (service.Service, error) {
switch assetCfg.Provider.(type) {
case *gatewayv1.Assets_S3:
aws, ok := service.Registry[awsservice.Name]
if !ok {
return nil, fmt.Errorf("The AWS service must be configured to use the asset s3 provider.")
}
return aws, nil
default:
// An asset provider does not necessarily require a service to function properly
// if there is nothing configured for a provider type we cant necessarily throw an error here.
return nil, nil
}
}
func customResponseForwarder(ctx context.Context, w http.ResponseWriter, resp proto.Message) error {
md, ok := runtime.ServerMetadataFromContext(ctx)
if !ok {
return nil
}
if cookies := md.HeaderMD.Get("Set-Cookie-Token"); len(cookies) > 0 {
cookie := &http.Cookie{
Name: "token",
Value: cookies[0],
Path: "/",
HttpOnly: false,
}
http.SetCookie(w, cookie)
}
if redirects := md.HeaderMD.Get("Location"); len(redirects) > 0 {
w.Header().Set("Location", redirects[0])
code := http.StatusFound
if st := md.HeaderMD.Get("Location-Status"); len(st) > 0 {
headerCodeOverride, err := strconv.Atoi(st[0])
if err != nil {
return err
}
code = headerCodeOverride
}
w.WriteHeader(code)
}
return nil
}
func customHeaderMatcher(key string) (string, bool) {
key = textproto.CanonicalMIMEHeaderKey(key)
if strings.HasPrefix(key, xHeader) {
// exclude handling these headers as they are looked up by grpc's annotate context flow and added to the context
// metadata if they're not found
if key != xForwardedFor && key != xForwardedHost {
return runtime.MetadataPrefix + key, true
}
}
// the the default header mapping rule
return runtime.DefaultHeaderMatcher(key)
}
func customErrorHandler(ctx context.Context, mux *runtime.ServeMux, m runtime.Marshaler, w http.ResponseWriter, req *http.Request, err error) {
// TODO(maybe): once we have non-browser clients we probably want to avoid the redirect and directly return the error.
if s, ok := status.FromError(err); ok && s.Code() == codes.Unauthenticated {
referer := req.Referer()
redirectPath := "/v1/authn/login"
if len(referer) != 0 {
referer, err := url.Parse(referer)
if err != nil {
runtime.DefaultHTTPErrorHandler(ctx, mux, m, w, req, err)
return
}
if redirectPath != referer.Path {
redirectPath = fmt.Sprintf("%s?redirect_url=%s", redirectPath, referer.Path)
}
}
http.Redirect(w, req, redirectPath, http.StatusFound)
return
}
runtime.DefaultHTTPErrorHandler(ctx, mux, m, w, req, err)
}
func New(unaryInterceptors []grpc.UnaryServerInterceptor, assets http.FileSystem, gatewayCfg *gatewayv1.GatewayOptions) (*Mux, error) {
grpcServer := grpc.NewServer(grpc.ChainUnaryInterceptor(unaryInterceptors...))
jsonGateway := runtime.NewServeMux(
runtime.WithForwardResponseOption(customResponseForwarder),
runtime.WithErrorHandler(customErrorHandler),
runtime.WithMarshalerOption(
runtime.MIMEWildcard,
&runtime.JSONPb{
MarshalOptions: protojson.MarshalOptions{
// Use camelCase for the JSON version.
UseProtoNames: false,
// Transmit zero-values over the wire.
EmitUnpopulated: true,
},
UnmarshalOptions: protojson.UnmarshalOptions{},
},
),
runtime.WithIncomingHeaderMatcher(customHeaderMatcher),
)
// If there is a configured asset provider, we check to see if the service is configured before proceeding.
// Bailing out early during the startup process instead of hitting this error at runtime when serving assets.
if gatewayCfg.Assets != nil && gatewayCfg.Assets.Provider != nil {
_, err := getAssetProviderService(gatewayCfg.Assets)
if err != nil {
return nil, err
}
}
httpMux := http.NewServeMux()
httpMux.Handle("/", &assetHandler{
assetCfg: gatewayCfg.Assets,
next: jsonGateway,
fileSystem: assets,
fileServer: http.FileServer(assets),
})
if gatewayCfg.EnablePprof {
httpMux.HandleFunc("/debug/pprof/", pprof.Index)
}
mux := &Mux{
GRPCServer: grpcServer,
JSONGateway: jsonGateway,
HTTPMux: httpMux,
}
return mux, nil
}
// Mux allows sharing one port between gRPC and the corresponding JSON gateway via header-based multiplexing.
type Mux struct {
// Create empty handlers for gRPC and grpc-gateway (JSON) traffic.
JSONGateway *runtime.ServeMux
HTTPMux http.Handler
GRPCServer *grpc.Server
}
// Adapted from https://github.com/grpc/grpc-go/blob/197c621/server.go#L760-L778.
func (m *Mux) ServeHTTP(w http.ResponseWriter, r *http.Request) {
if r.ProtoMajor == 2 && strings.HasPrefix(r.Header.Get("Content-Type"), "application/grpc") {
m.GRPCServer.ServeHTTP(w, r)
} else {
m.HTTPMux.ServeHTTP(w, r)
}
}
func (m *Mux) EnableGRPCReflection() {
reflection.Register(m.GRPCServer)
}
// "h2c" is the unencrypted form of HTTP/2.
func InsecureHandler(handler http.Handler) http.Handler {
return h2c.NewHandler(handler, &http2.Server{})
}
| 1 | 11,174 | super nit: I think this might be easier to read as `requestHeadersFromResponseWriter` | lyft-clutch | go |
@@ -4,12 +4,8 @@
* (found in the LICENSE.Apache file in the root directory)
*/
#include <gtest/gtest.h>
-#include <cstdlib>
-#include <thread>
-#include <mutex>
-#include <atomic>
-#include "common/concurrent/Barrier.h"
-#include "common/thread/GenericThreadPool.h"
+#include "concurrent/Barrier.h"
+#include "thread/GenericThreadPool.h"
namespace vesoft {
namespace concurrent { | 1 | /* Copyright (c) 2018 - present, VE Software Inc. All rights reserved
*
* This source code is licensed under Apache 2.0 License
* (found in the LICENSE.Apache file in the root directory)
*/
#include <gtest/gtest.h>
#include <cstdlib>
#include <thread>
#include <mutex>
#include <atomic>
#include "common/concurrent/Barrier.h"
#include "common/thread/GenericThreadPool.h"
namespace vesoft {
namespace concurrent {
TEST(BarrierTest, BasicTest) {
// test for invalid initial counter
{
ASSERT_THROW({Barrier barrier(0UL);}, std::invalid_argument);
}
// test for single-thread normal case
{
Barrier barrier(1UL);
barrier.wait();
ASSERT_TRUE(true);
}
// test for multiple-thread normal case
{
Barrier barrier(2UL);
std::atomic<size_t> counter{0};
auto cb = [&] () {
barrier.wait();
++counter;
};
std::thread thread(cb);
usleep(1000);
ASSERT_EQ(0UL, counter.load());
barrier.wait();
thread.join();
ASSERT_EQ(1UL, counter.load());
}
// test for multiple-thread completion
{
std::atomic<size_t> counter{0};
auto completion = [&] () {
++counter;
++counter;
};
Barrier barrier(2UL, completion);
auto cb = [&] () {
barrier.wait();
++counter;
};
std::thread thread(cb);
usleep(1000);
ASSERT_EQ(0UL, counter.load());
barrier.wait();
ASSERT_GE(counter.load(), 2UL);
thread.join();
ASSERT_EQ(3UL, counter.load());
}
}
TEST(BarrierTest, ConsecutiveTest) {
std::atomic<size_t> counter{0};
constexpr auto N = 64UL;
constexpr auto iters = 100UL;
auto completion = [&] () {
// At the completion phase, `counter' should be multiple to `N'.
ASSERT_EQ(0UL, counter.load() % N);
};
Barrier barrier(N, completion);
auto cb = [&] () {
auto i = iters;
while (i-- != 0) {
++counter;
barrier.wait();
}
};
std::vector<std::thread> threads;
for (auto i = 0UL; i < N; i++) {
threads.emplace_back(cb);
}
for (auto &thread : threads) {
thread.join();
}
ASSERT_EQ(0UL, counter.load() % N);
}
} // namespace concurrent
} // namespace vesoft
| 1 | 13,975 | Need to include "common/base/Base.h" in front of this line | vesoft-inc-nebula | cpp |
@@ -1,6 +1,5 @@
import getRole from '../aria/get-role';
-import matches from '../matches/matches';
-import nativeElementType from './native-element-type';
+import getElementSpec from '../standards/get-element-spec';
import nativeTextMethods from './native-text-methods';
/** | 1 | import getRole from '../aria/get-role';
import matches from '../matches/matches';
import nativeElementType from './native-element-type';
import nativeTextMethods from './native-text-methods';
/**
* Get the accessible text using native HTML methods only
* @param {VirtualNode} element
* @param {Object} context
* @property {Bool} debug Enable logging for formControlValue
* @return {String} Accessible text
*/
function nativeTextAlternative(virtualNode, context = {}) {
const { actualNode } = virtualNode;
if (
actualNode.nodeType !== 1 ||
['presentation', 'none'].includes(getRole(virtualNode))
) {
return '';
}
const textMethods = findTextMethods(virtualNode);
// Find the first step that returns a non-empty string
let accName = textMethods.reduce((accName, step) => {
return accName || step(virtualNode, context);
}, '');
if (context.debug) {
axe.log(accName || '{empty-value}', actualNode, context);
}
return accName;
}
/**
* Get accessible text functions for a specific native HTML element
* @private
* @param {VirtualNode} element
* @return {Function[]} Array of native accessible name computation methods
*/
function findTextMethods(virtualNode) {
const nativeType = nativeElementType.find(type => {
return matches(virtualNode, type.matches);
});
// Use concat because namingMethods can be a string or an array of strings
const methods = nativeType ? [].concat(nativeType.namingMethods) : [];
return methods.map(methodName => nativeTextMethods[methodName]);
}
export default nativeTextAlternative;
| 1 | 15,747 | Changes in this file should go into a separate PR. | dequelabs-axe-core | js |
@@ -243,10 +243,15 @@ public class PhpSurfaceNamer extends SurfaceNamer {
if (token.equals(InitFieldConfig.RANDOM_TOKEN)) {
stringParts.add("time()");
} else {
- stringParts.add("\"" + token + "\"");
+ stringParts.add(quoted(token));
}
}
}
return Joiner.on(". ").join(stringParts);
}
+
+ @Override
+ public String quoted(String text) {
+ return "\'" + text + "\'";
+ }
} | 1 | /* Copyright 2016 Google Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.api.codegen.transformer.php;
import com.google.api.codegen.ServiceMessages;
import com.google.api.codegen.config.GapicInterfaceConfig;
import com.google.api.codegen.config.GapicMethodConfig;
import com.google.api.codegen.config.InterfaceConfig;
import com.google.api.codegen.config.SingleResourceNameConfig;
import com.google.api.codegen.config.VisibilityConfig;
import com.google.api.codegen.metacode.InitFieldConfig;
import com.google.api.codegen.transformer.ModelTypeFormatterImpl;
import com.google.api.codegen.transformer.ModelTypeTable;
import com.google.api.codegen.transformer.SurfaceNamer;
import com.google.api.codegen.util.CommonRenderingUtil;
import com.google.api.codegen.util.Name;
import com.google.api.codegen.util.NamePath;
import com.google.api.codegen.util.php.PhpCommentReformatter;
import com.google.api.codegen.util.php.PhpNameFormatter;
import com.google.api.codegen.util.php.PhpPackageUtil;
import com.google.api.codegen.util.php.PhpTypeTable;
import com.google.api.tools.framework.model.Field;
import com.google.api.tools.framework.model.Interface;
import com.google.api.tools.framework.model.Method;
import com.google.api.tools.framework.model.TypeRef;
import com.google.common.base.Joiner;
import java.io.File;
import java.util.ArrayList;
/** The SurfaceNamer for PHP. */
public class PhpSurfaceNamer extends SurfaceNamer {
public PhpSurfaceNamer(String packageName) {
super(
new PhpNameFormatter(),
new ModelTypeFormatterImpl(new PhpModelTypeNameConverter(packageName)),
new PhpTypeTable(packageName),
new PhpCommentReformatter(),
packageName,
packageName);
}
@Override
public SurfaceNamer cloneWithPackageName(String packageName) {
return new PhpSurfaceNamer(packageName);
}
@Override
public String getLroApiMethodName(Method method, VisibilityConfig visibility) {
return getApiMethodName(method, visibility);
}
@Override
public String getFieldSetFunctionName(TypeRef type, Name identifier) {
return publicMethodName(Name.from("set").join(identifier));
}
@Override
public String getFieldGetFunctionName(TypeRef type, Name identifier) {
return publicMethodName(Name.from("get").join(identifier));
}
/** The function name to format the entity for the given collection. */
@Override
public String getFormatFunctionName(
Interface apiInterface, SingleResourceNameConfig resourceNameConfig) {
return publicMethodName(Name.from(resourceNameConfig.getEntityName(), "name"));
}
@Override
public String getPathTemplateName(
Interface apiInterface, SingleResourceNameConfig resourceNameConfig) {
return inittedConstantName(Name.from(resourceNameConfig.getEntityName(), "name", "template"));
}
@Override
public String getClientConfigPath(Interface apiInterface) {
return "../resources/"
+ Name.upperCamel(apiInterface.getSimpleName()).join("client_config").toLowerUnderscore()
+ ".json";
}
@Override
public boolean shouldImportRequestObjectParamType(Field field) {
return field.getType().isMap();
}
@Override
public String getRetrySettingsTypeName() {
return "\\Google\\GAX\\RetrySettings";
}
@Override
public String getOptionalArrayTypeName() {
return "array";
}
@Override
public String getDynamicLangReturnTypeName(Method method, GapicMethodConfig methodConfig) {
if (new ServiceMessages().isEmptyType(method.getOutputType())) {
return "";
}
if (methodConfig.isPageStreaming()) {
return "\\Google\\GAX\\PagedListResponse";
}
if (methodConfig.isLongRunningOperation()) {
return "\\Google\\GAX\\OperationResponse";
}
switch (methodConfig.getGrpcStreamingType()) {
case NonStreaming:
return getModelTypeFormatter().getFullNameFor(method.getOutputType());
case BidiStreaming:
return "\\Google\\GAX\\BidiStream";
case ClientStreaming:
return "\\Google\\GAX\\ClientStream";
case ServerStreaming:
return "\\Google\\GAX\\ServerStream";
default:
return getNotImplementedString(
"SurfaceNamer.getDynamicReturnTypeName grpcStreamingType:"
+ methodConfig.getGrpcStreamingType().toString());
}
}
@Override
public String getFullyQualifiedApiWrapperClassName(GapicInterfaceConfig interfaceConfig) {
return getPackageName() + "\\" + getApiWrapperClassName(interfaceConfig);
}
@Override
public String getApiWrapperClassImplName(InterfaceConfig interfaceConfig) {
return publicClassName(Name.upperCamel(getInterfaceName(interfaceConfig), "GapicClient"));
}
@Override
public String getGrpcClientTypeName(Interface apiInterface) {
return qualifiedName(getGrpcClientTypeName(apiInterface, "GrpcClient"));
}
private NamePath getGrpcClientTypeName(Interface apiInterface, String suffix) {
NamePath namePath =
getTypeNameConverter().getNamePath(getModelTypeFormatter().getFullNameFor(apiInterface));
String publicClassName =
publicClassName(Name.upperCamelKeepUpperAcronyms(namePath.getHead(), suffix));
return namePath.withHead(publicClassName);
}
@Override
public String getLongRunningOperationTypeName(ModelTypeTable typeTable, TypeRef type) {
return typeTable.getAndSaveNicknameFor(type);
}
@Override
public String getRequestTypeName(ModelTypeTable typeTable, TypeRef type) {
return typeTable.getAndSaveNicknameFor(type);
}
@Override
public String getGrpcStubCallString(Interface apiInterface, Method method) {
return '/' + apiInterface.getFullName() + '/' + getGrpcMethodName(method);
}
@Override
public String getGapicImplNamespace() {
return PhpPackageUtil.buildPackageName(getPackageName(), "Gapic");
}
@Override
public String getTestPackageName(TestKind testKind) {
return getTestPackageName(getPackageName(), testKind);
}
/** Insert "Tests" into the package name after "Google\Cloud" standard prefix */
private static String getTestPackageName(String packageName, TestKind testKind) {
final String[] PACKAGE_PREFIX = PhpPackageUtil.getStandardPackagePrefix();
ArrayList<String> packageComponents = new ArrayList<>();
String[] packageSplit = PhpPackageUtil.splitPackageName(packageName);
int packageStartIndex = 0;
for (int i = 0; i < PACKAGE_PREFIX.length && i < packageSplit.length; i++) {
if (packageSplit[i].equals(PACKAGE_PREFIX[i])) {
packageStartIndex++;
} else {
break;
}
}
for (int i = 0; i < packageStartIndex; i++) {
packageComponents.add(packageSplit[i]);
}
packageComponents.add("Tests");
switch (testKind) {
case UNIT:
packageComponents.add("Unit");
break;
case SYSTEM:
packageComponents.add("System");
break;
}
for (int i = packageStartIndex; i < packageSplit.length; i++) {
packageComponents.add(packageSplit[i]);
}
return PhpPackageUtil.buildPackageName(packageComponents);
}
@Override
public boolean methodHasRetrySettings(GapicMethodConfig methodConfig) {
return !methodConfig.isGrpcStreaming();
}
@Override
public boolean methodHasTimeoutSettings(GapicMethodConfig methodConfig) {
return methodConfig.isGrpcStreaming();
}
@Override
public String getSourceFilePath(String path, String className) {
return path + File.separator + className + ".php";
}
@Override
public String injectRandomStringGeneratorCode(String randomString) {
String delimiter = ",";
String[] split =
CommonRenderingUtil.stripQuotes(randomString)
.replace(
InitFieldConfig.RANDOM_TOKEN, delimiter + InitFieldConfig.RANDOM_TOKEN + delimiter)
.split(delimiter);
ArrayList<String> stringParts = new ArrayList<>();
for (String token : split) {
if (token.length() > 0) {
if (token.equals(InitFieldConfig.RANDOM_TOKEN)) {
stringParts.add("time()");
} else {
stringParts.add("\"" + token + "\"");
}
}
}
return Joiner.on(". ").join(stringParts);
}
}
| 1 | 23,996 | Please ensure that this will not start putting single quotes where double quotes are expected. | googleapis-gapic-generator | java |
@@ -68,7 +68,7 @@ type Manager struct {
}
// ProvideConfig provides the config for consumer
-func (manager *Manager) ProvideConfig(publicKey json.RawMessage, pingerPort func(int) int) (session.ServiceConfiguration, session.DestroyCallback, error) {
+func (manager *Manager) ProvideConfig(publicKey json.RawMessage, pingerPort func(int, int) int) (session.ServiceConfiguration, session.DestroyCallback, error) {
key := &wg.ConsumerConfig{}
err := json.Unmarshal(publicKey, key)
if err != nil { | 1 | /*
* Copyright (C) 2019 The "MysteriumNetwork/node" Authors.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package service
import (
"encoding/json"
"sync"
log "github.com/cihub/seelog"
"github.com/mysteriumnetwork/node/core/ip"
"github.com/mysteriumnetwork/node/core/port"
"github.com/mysteriumnetwork/node/firewall"
"github.com/mysteriumnetwork/node/identity"
"github.com/mysteriumnetwork/node/nat"
wg "github.com/mysteriumnetwork/node/services/wireguard"
"github.com/mysteriumnetwork/node/services/wireguard/endpoint"
"github.com/mysteriumnetwork/node/services/wireguard/resources"
"github.com/mysteriumnetwork/node/session"
"github.com/pkg/errors"
)
// NewManager creates new instance of Wireguard service
func NewManager(
ipResolver ip.Resolver,
natService nat.NATService,
portMap func(port int) (releasePortMapping func()),
options Options,
portSupplier port.ServicePortSupplier,
) *Manager {
resourceAllocator := resources.NewAllocator(portSupplier, options.Subnet)
return &Manager{
natService: natService,
resourceAllocator: resourceAllocator,
portMap: portMap,
ipResolver: ipResolver,
options: options,
}
}
// Manager represents an instance of Wireguard service
type Manager struct {
wg sync.WaitGroup
natService nat.NATService
connectionEndpoint wg.ConnectionEndpoint
resourceAllocator *resources.Allocator
ipResolver ip.Resolver
portMap func(port int) (releasePortMapping func())
options Options
}
// ProvideConfig provides the config for consumer
func (manager *Manager) ProvideConfig(publicKey json.RawMessage, pingerPort func(int) int) (session.ServiceConfiguration, session.DestroyCallback, error) {
key := &wg.ConsumerConfig{}
err := json.Unmarshal(publicKey, key)
if err != nil {
return nil, nil, err
}
config, err := manager.connectionEndpoint.Config()
if err != nil {
return nil, nil, err
}
if err := manager.connectionEndpoint.AddPeer(key.PublicKey, nil, config.Consumer.IPAddress.IP.String()+"/32"); err != nil {
return nil, nil, err
}
destroy := func() {
if err := manager.resourceAllocator.ReleaseIPNet(config.Consumer.IPAddress); err != nil {
log.Error(logPrefix, "failed to release IP network", err)
}
if err := manager.connectionEndpoint.RemovePeer(key.PublicKey); err != nil {
log.Error(logPrefix, "failed to remove peer: ", key.PublicKey, err)
}
}
return config, destroy, nil
}
// Serve starts service - does block
func (manager *Manager) Serve(providerID identity.Identity) error {
manager.wg.Add(1)
connectionEndpoint, err := endpoint.NewConnectionEndpoint(manager.ipResolver, manager.resourceAllocator, manager.portMap, manager.options.ConnectDelay)
if err != nil {
return err
}
if err := connectionEndpoint.Start(nil); err != nil {
return err
}
outIP, err := manager.ipResolver.GetOutboundIP()
if err != nil {
return err
}
config, err := connectionEndpoint.Config()
if err != nil {
return err
}
if err := firewall.AddInboundRule("UDP", config.Provider.Endpoint.Port); err != nil {
return errors.Wrap(err, "failed to add firewall rule")
}
defer func() {
if err := firewall.RemoveInboundRule("UDP", config.Provider.Endpoint.Port); err != nil {
log.Error(logPrefix, "Failed to delete firewall rule for Wireguard", err)
}
}()
natRule := nat.RuleForwarding{SourceAddress: config.Consumer.IPAddress.String(), TargetIP: outIP}
if err := manager.natService.Add(natRule); err != nil {
return errors.Wrap(err, "failed to add NAT forwarding rule")
}
manager.connectionEndpoint = connectionEndpoint
log.Info(logPrefix, "Wireguard service started successfully")
manager.wg.Wait()
return nil
}
// Stop stops service.
func (manager *Manager) Stop() error {
manager.wg.Done()
manager.connectionEndpoint.Stop()
log.Info(logPrefix, "Wireguard service stopped")
return nil
}
| 1 | 14,282 | `ProvideConfig` was changed, looks like it will not compile for windows, and should be changed too. | mysteriumnetwork-node | go |
@@ -76,9 +76,8 @@ func Search(ctx *context.APIContext) {
})
}
-// https://github.com/gogits/go-gogs-client/wiki/Repositories#list-your-repositories
-func ListMyRepos(ctx *context.APIContext) {
- ownRepos, err := models.GetUserRepositories(ctx.User.ID, true, 1, ctx.User.NumRepos)
+func listUserRepos(ctx *context.APIContext, u *models.User) {
+ ownRepos, err := models.GetUserRepositories(u.ID, true, 1, u.NumRepos)
if err != nil {
ctx.Error(500, "GetRepositories", err)
return | 1 | // Copyright 2014 The Gogs Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package repo
import (
"path"
api "github.com/gogits/go-gogs-client"
"github.com/gogits/gogs/models"
"github.com/gogits/gogs/modules/auth"
"github.com/gogits/gogs/modules/context"
"github.com/gogits/gogs/modules/log"
"github.com/gogits/gogs/modules/setting"
"github.com/gogits/gogs/routers/api/v1/convert"
)
// https://github.com/gogits/go-gogs-client/wiki/Repositories#search-repositories
func Search(ctx *context.APIContext) {
opts := &models.SearchRepoOptions{
Keyword: path.Base(ctx.Query("q")),
OwnerID: ctx.QueryInt64("uid"),
PageSize: convert.ToCorrectPageSize(ctx.QueryInt("limit")),
}
// Check visibility.
if ctx.IsSigned && opts.OwnerID > 0 {
if ctx.User.ID == opts.OwnerID {
opts.Private = true
} else {
u, err := models.GetUserByID(opts.OwnerID)
if err != nil {
ctx.JSON(500, map[string]interface{}{
"ok": false,
"error": err.Error(),
})
return
}
if u.IsOrganization() && u.IsOwnedBy(ctx.User.ID) {
opts.Private = true
}
// FIXME: how about collaborators?
}
}
repos, count, err := models.SearchRepositoryByName(opts)
if err != nil {
ctx.JSON(500, map[string]interface{}{
"ok": false,
"error": err.Error(),
})
return
}
results := make([]*api.Repository, len(repos))
for i := range repos {
if err = repos[i].GetOwner(); err != nil {
ctx.JSON(500, map[string]interface{}{
"ok": false,
"error": err.Error(),
})
return
}
results[i] = &api.Repository{
ID: repos[i].ID,
FullName: path.Join(repos[i].Owner.Name, repos[i].Name),
}
}
ctx.SetLinkHeader(int(count), setting.API.MaxResponseItems)
ctx.JSON(200, map[string]interface{}{
"ok": true,
"data": results,
})
}
// https://github.com/gogits/go-gogs-client/wiki/Repositories#list-your-repositories
func ListMyRepos(ctx *context.APIContext) {
ownRepos, err := models.GetUserRepositories(ctx.User.ID, true, 1, ctx.User.NumRepos)
if err != nil {
ctx.Error(500, "GetRepositories", err)
return
}
numOwnRepos := len(ownRepos)
accessibleRepos, err := ctx.User.GetRepositoryAccesses()
if err != nil {
ctx.Error(500, "GetRepositoryAccesses", err)
return
}
repos := make([]*api.Repository, numOwnRepos+len(accessibleRepos))
for i := range ownRepos {
repos[i] = ownRepos[i].APIFormat(&api.Permission{true, true, true})
}
i := numOwnRepos
for repo, access := range accessibleRepos {
repos[i] = repo.APIFormat(&api.Permission{
Admin: access >= models.ACCESS_MODE_ADMIN,
Push: access >= models.ACCESS_MODE_WRITE,
Pull: true,
})
i++
}
ctx.JSON(200, &repos)
}
func CreateUserRepo(ctx *context.APIContext, owner *models.User, opt api.CreateRepoOption) {
repo, err := models.CreateRepository(owner, models.CreateRepoOptions{
Name: opt.Name,
Description: opt.Description,
Gitignores: opt.Gitignores,
License: opt.License,
Readme: opt.Readme,
IsPrivate: opt.Private,
AutoInit: opt.AutoInit,
})
if err != nil {
if models.IsErrRepoAlreadyExist(err) ||
models.IsErrNameReserved(err) ||
models.IsErrNamePatternNotAllowed(err) {
ctx.Error(422, "", err)
} else {
if repo != nil {
if err = models.DeleteRepository(ctx.User.ID, repo.ID); err != nil {
log.Error(4, "DeleteRepository: %v", err)
}
}
ctx.Error(500, "CreateRepository", err)
}
return
}
ctx.JSON(201, repo.APIFormat(&api.Permission{true, true, true}))
}
// https://github.com/gogits/go-gogs-client/wiki/Repositories#create
func Create(ctx *context.APIContext, opt api.CreateRepoOption) {
// Shouldn't reach this condition, but just in case.
if ctx.User.IsOrganization() {
ctx.Error(422, "", "not allowed creating repository for organization")
return
}
CreateUserRepo(ctx, ctx.User, opt)
}
func CreateOrgRepo(ctx *context.APIContext, opt api.CreateRepoOption) {
org, err := models.GetOrgByName(ctx.Params(":org"))
if err != nil {
if models.IsErrUserNotExist(err) {
ctx.Error(422, "", err)
} else {
ctx.Error(500, "GetOrgByName", err)
}
return
}
if !org.IsOwnedBy(ctx.User.ID) {
ctx.Error(403, "", "Given user is not owner of organization.")
return
}
CreateUserRepo(ctx, org, opt)
}
// https://github.com/gogits/go-gogs-client/wiki/Repositories#migrate
func Migrate(ctx *context.APIContext, form auth.MigrateRepoForm) {
ctxUser := ctx.User
// Not equal means context user is an organization,
// or is another user/organization if current user is admin.
if form.Uid != ctxUser.ID {
org, err := models.GetUserByID(form.Uid)
if err != nil {
if models.IsErrUserNotExist(err) {
ctx.Error(422, "", err)
} else {
ctx.Error(500, "GetUserByID", err)
}
return
}
ctxUser = org
}
if ctx.HasError() {
ctx.Error(422, "", ctx.GetErrMsg())
return
}
if ctxUser.IsOrganization() && !ctx.User.IsAdmin {
// Check ownership of organization.
if !ctxUser.IsOwnedBy(ctx.User.ID) {
ctx.Error(403, "", "Given user is not owner of organization.")
return
}
}
remoteAddr, err := form.ParseRemoteAddr(ctx.User)
if err != nil {
if models.IsErrInvalidCloneAddr(err) {
addrErr := err.(models.ErrInvalidCloneAddr)
switch {
case addrErr.IsURLError:
ctx.Error(422, "", err)
case addrErr.IsPermissionDenied:
ctx.Error(422, "", "You are not allowed to import local repositories.")
case addrErr.IsInvalidPath:
ctx.Error(422, "", "Invalid local path, it does not exist or not a directory.")
default:
ctx.Error(500, "ParseRemoteAddr", "Unknown error type (ErrInvalidCloneAddr): "+err.Error())
}
} else {
ctx.Error(500, "ParseRemoteAddr", err)
}
return
}
repo, err := models.MigrateRepository(ctxUser, models.MigrateRepoOptions{
Name: form.RepoName,
Description: form.Description,
IsPrivate: form.Private || setting.Repository.ForcePrivate,
IsMirror: form.Mirror,
RemoteAddr: remoteAddr,
})
if err != nil {
if repo != nil {
if errDelete := models.DeleteRepository(ctxUser.ID, repo.ID); errDelete != nil {
log.Error(4, "DeleteRepository: %v", errDelete)
}
}
ctx.Error(500, "MigrateRepository", models.HandleCloneUserCredentials(err.Error(), true))
return
}
log.Trace("Repository migrated: %s/%s", ctxUser.Name, form.RepoName)
ctx.JSON(201, repo.APIFormat(&api.Permission{true, true, true}))
}
func parseOwnerAndRepo(ctx *context.APIContext) (*models.User, *models.Repository) {
owner, err := models.GetUserByName(ctx.Params(":username"))
if err != nil {
if models.IsErrUserNotExist(err) {
ctx.Error(422, "", err)
} else {
ctx.Error(500, "GetUserByName", err)
}
return nil, nil
}
repo, err := models.GetRepositoryByName(owner.ID, ctx.Params(":reponame"))
if err != nil {
if models.IsErrRepoNotExist(err) {
ctx.Status(404)
} else {
ctx.Error(500, "GetRepositoryByName", err)
}
return nil, nil
}
return owner, repo
}
// https://github.com/gogits/go-gogs-client/wiki/Repositories#get
func Get(ctx *context.APIContext) {
_, repo := parseOwnerAndRepo(ctx)
if ctx.Written() {
return
}
ctx.JSON(200, repo.APIFormat(&api.Permission{true, true, true}))
}
// https://github.com/gogits/go-gogs-client/wiki/Repositories#delete
func Delete(ctx *context.APIContext) {
owner, repo := parseOwnerAndRepo(ctx)
if ctx.Written() {
return
}
if owner.IsOrganization() && !owner.IsOwnedBy(ctx.User.ID) {
ctx.Error(403, "", "Given user is not owner of organization.")
return
}
if err := models.DeleteRepository(owner.ID, repo.ID); err != nil {
ctx.Error(500, "DeleteRepository", err)
return
}
log.Trace("Repository deleted: %s/%s", owner.Name, repo.Name)
ctx.Status(204)
}
| 1 | 11,865 | This does not look right, you're listing all private repositories.. | gogs-gogs | go |
@@ -20,4 +20,6 @@ C2::Application.routes.draw do
get 'bookmarklet', to: redirect('bookmarklet.html')
get "/498", :to => "errors#token_authentication_error"
+ match "*path" => "application#xss_options_request", :via => :options
+
end | 1 | C2::Application.routes.draw do
get 'approval_groups/search' => "approval_groups#search"
resources :approval_groups
post 'send_cart' => 'communicarts#send_cart'
post 'approval_reply_received' => 'communicarts#approval_reply_received'
match 'approval_response', to: 'communicarts#approval_response', via: [:get, :put]
root :to => 'home#index'
match "/auth/:provider/callback" => "home#oauth_callback", via: [:get]
post "/logout" => "home#logout"
get 'overlay', to: "overlay#index"
resources :carts do
resources :comments
end
resources :cart_items do
resources :comments
end
get 'bookmarklet', to: redirect('bookmarklet.html')
get "/498", :to => "errors#token_authentication_error"
end
| 1 | 12,022 | An OPTIONS request should respond from _any_ path? Seems weird to me... | 18F-C2 | rb |
@@ -40,6 +40,8 @@ class MediaAdmin extends Admin
->add('category', null, array(
'show_filter' => false,
))
+ ->add('width')
+ ->add('height')
;
$providers = array(); | 1 | <?php
/*
* This file is part of the Sonata package.
*
* (c) Thomas Rabaix <[email protected]>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
namespace Sonata\MediaBundle\Admin\ORM;
use Sonata\MediaBundle\Admin\BaseMediaAdmin as Admin;
use Sonata\AdminBundle\Datagrid\DatagridMapper;
class MediaAdmin extends Admin
{
/**
* @param \Sonata\AdminBundle\Datagrid\DatagridMapper $datagridMapper
* @return void
*/
protected function configureDatagridFilters(DatagridMapper $datagridMapper)
{
$options = array(
'choices' => array()
);
foreach ($this->pool->getContexts() as $name => $context) {
$options['choices'][$name] = $name;
}
$datagridMapper
->add('name')
->add('providerReference')
->add('enabled')
->add('context', null, array(
'show_filter' => $this->getPersistentParameter('hide_context') !== true
), 'choice', $options)
->add('category', null, array(
'show_filter' => false,
))
;
$providers = array();
$providerNames = (array) $this->pool->getProviderNamesByContext($this->getPersistentParameter('context', $this->pool->getDefaultContext()));
foreach ($providerNames as $name) {
$providers[$name] = $name;
}
$datagridMapper->add('providerName', 'doctrine_orm_choice', array(
'field_options'=> array(
'choices' => $providers,
'required' => false,
'multiple' => false,
'expanded' => false,
),
'field_type'=> 'choice',
));
}
}
| 1 | 6,454 | you need to add the content type | sonata-project-SonataMediaBundle | php |
@@ -12,6 +12,14 @@ namespace Datadog.Trace
{
private static Task _traceAgentMonitor;
private static Task _dogStatsDMonitor;
+ private static Process _traceAgentProcess;
+ private static Process _dogStatsProcess;
+
+ public static void StopSubProcesses()
+ {
+ SafelyKillProcess(_traceAgentProcess, "Failed to halt the sub-process trace agent");
+ SafelyKillProcess(_dogStatsProcess, "Failed to halt the sub-process stats agent");
+ }
public static void StartStandaloneAgentProcessesWhenConfigured()
{ | 1 | using System;
using System.Diagnostics;
using System.IO;
using System.Threading;
using System.Threading.Tasks;
using Datadog.Trace.Configuration;
using Datadog.Trace.Logging;
namespace Datadog.Trace
{
internal class TracerSubProcessManager
{
private static Task _traceAgentMonitor;
private static Task _dogStatsDMonitor;
public static void StartStandaloneAgentProcessesWhenConfigured()
{
try
{
var traceAgentPath = Environment.GetEnvironmentVariable(ConfigurationKeys.TraceAgentPath);
if (!string.IsNullOrWhiteSpace(traceAgentPath))
{
var traceProcessArgs = Environment.GetEnvironmentVariable(ConfigurationKeys.TraceAgentArgs);
_traceAgentMonitor = StartProcessWithKeepAlive(traceAgentPath, traceProcessArgs);
}
else
{
DatadogLogging.RegisterStartupLog(log => log.Debug("There is no path configured for {0}.", ConfigurationKeys.TraceAgentPath));
}
var dogStatsDPath = Environment.GetEnvironmentVariable(ConfigurationKeys.DogStatsDPath);
if (!string.IsNullOrWhiteSpace(dogStatsDPath))
{
var dogStatsDArgs = Environment.GetEnvironmentVariable(ConfigurationKeys.DogStatsDArgs);
_dogStatsDMonitor = StartProcessWithKeepAlive(dogStatsDPath, dogStatsDArgs);
}
else
{
DatadogLogging.RegisterStartupLog(log => log.Debug("There is no path configured for {0}.", ConfigurationKeys.DogStatsDPath));
}
}
catch (Exception ex)
{
DatadogLogging.RegisterStartupLog(log => log.Error(ex, "Error when attempting to start standalone agent processes."));
}
}
private static bool ProgramIsRunning(string fullPath)
{
if (string.IsNullOrWhiteSpace(fullPath))
{
return false;
}
var fileName = Path.GetFileNameWithoutExtension(fullPath);
var processesByName = Process.GetProcessesByName(fileName);
if (processesByName?.Length > 0)
{
// We enforce a unique enough naming within contexts where we would use sub-processes
return true;
}
return false;
}
private static Task StartProcessWithKeepAlive(string path, string args)
{
DatadogLogging.RegisterStartupLog(log => log.Debug("Starting keep alive for {0}.", path));
return Task.Run(
() =>
{
try
{
var circuitBreakerMax = 3;
var sequentialFailures = 0;
while (true)
{
try
{
if (ProgramIsRunning(path))
{
DatadogLogging.RegisterStartupLog(log => log.Debug("{0} is already running.", path));
continue;
}
var startInfo = new ProcessStartInfo { FileName = path };
if (!string.IsNullOrWhiteSpace(args))
{
startInfo.Arguments = args;
}
DatadogLogging.RegisterStartupLog(log => log.Debug("Starting {0}.", path));
var process = Process.Start(startInfo);
Thread.Sleep(150);
if (process == null || process.HasExited)
{
DatadogLogging.RegisterStartupLog(log => log.Error("{0} has failed to start.", path));
sequentialFailures++;
}
else
{
DatadogLogging.RegisterStartupLog(log => log.Debug("Successfully started {0}.", path));
sequentialFailures = 0;
}
}
catch (Exception ex)
{
DatadogLogging.RegisterStartupLog(log => log.Error(ex, "Exception when trying to start an instance of {0}.", path));
sequentialFailures++;
}
finally
{
// Delay for a reasonable amount of time before we check to see if the process is alive again.
Thread.Sleep(20_000);
}
if (sequentialFailures >= circuitBreakerMax)
{
DatadogLogging.RegisterStartupLog(log => log.Error("Circuit breaker triggered for {0}. Max failed retries reached ({1}).", path, sequentialFailures));
break;
}
}
}
finally
{
DatadogLogging.RegisterStartupLog(log => log.Debug("Keep alive is dropping for {0}.", path));
}
});
}
}
}
| 1 | 16,534 | Do we need to distinguish between these two processes? I'm thinking maybe we can have a list of processes and treat them all equally. | DataDog-dd-trace-dotnet | .cs |
@@ -61,8 +61,8 @@ callee_info_t default_callee_info;
int
get_clean_call_switch_stack_size(void)
{
-#ifdef AARCH64
- /* Stack size needs to be 16 byte aligned on ARM */
+#if defined(AARCH64) || defined(X64)
+ /* Stack size needs to be 16 byte aligned on ARM and x64. */
return ALIGN_FORWARD(sizeof(priv_mcontext_t), 16);
#else
return sizeof(priv_mcontext_t); | 1 | /* ******************************************************************************
* Copyright (c) 2010-2017 Google, Inc. All rights reserved.
* Copyright (c) 2010 Massachusetts Institute of Technology All rights reserved.
* Copyright (c) 2000-2010 VMware, Inc. All rights reserved.
* ******************************************************************************/
/*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* * Neither the name of VMware, Inc. nor the names of its contributors may be
* used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL VMWARE, INC. OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*/
/* Copyright (c) 2003-2007 Determina Corp. */
/* Copyright (c) 2001-2003 Massachusetts Institute of Technology */
/* Copyright (c) 2000-2001 Hewlett-Packard Company */
/* file "mangle_shared.c" */
#include "../globals.h"
#include "arch.h"
#include "instr_create.h"
#include "instrument.h" /* for insert_get_mcontext_base */
#include "decode_fast.h" /* for decode_next_pc */
#ifdef ANNOTATIONS
# include "../annotations.h"
#endif
/* Make code more readable by shortening long lines.
* We mark everything we add as non-app instr.
*/
#define POST instrlist_meta_postinsert
#define PRE instrlist_meta_preinsert
clean_call_info_t default_clean_call_info;
callee_info_t default_callee_info;
/* the stack size of a full context switch for clean call */
int
get_clean_call_switch_stack_size(void)
{
#ifdef AARCH64
/* Stack size needs to be 16 byte aligned on ARM */
return ALIGN_FORWARD(sizeof(priv_mcontext_t), 16);
#else
return sizeof(priv_mcontext_t);
#endif
}
/* extra temporarily-used stack usage beyond
* get_clean_call_switch_stack_size()
*/
int
get_clean_call_temp_stack_size(void)
{
return XSP_SZ; /* for eflags clear code: push 0; popf */
}
/* utility routines for inserting clean calls to an instrumentation routine
* strategy is very similar to fcache_enter/return
* FIXME: try to share code with fcache_enter/return?
*
* first swap stacks to DynamoRIO stack:
* SAVE_TO_UPCONTEXT %xsp,xsp_OFFSET
* RESTORE_FROM_DCONTEXT dstack_OFFSET,%xsp
* swap peb/teb fields
* now save app eflags and registers, being sure to lay them out on
* the stack in priv_mcontext_t order:
* push $0 # for priv_mcontext_t.pc; wasted, for now
* pushf
* pusha # xsp is dstack-XSP_SZ*2; rest are app values
* clear the eflags for our usage
* ASSUMPTION (also made in x86.asm): 0 ok, reserved bits are not set by popf,
* and clearing, not preserving, is good enough
* push $0
* popf
* make the call
* call routine
* restore app regs and eflags
* popa
* popf
* lea XSP_SZ(xsp),xsp # clear priv_mcontext_t.pc slot
* swap peb/teb fields
* restore app stack
* RESTORE_FROM_UPCONTEXT xsp_OFFSET,%xsp
*/
void
insert_get_mcontext_base(dcontext_t *dcontext, instrlist_t *ilist,
instr_t *where, reg_id_t reg)
{
PRE(ilist, where, instr_create_restore_from_tls
(dcontext, reg, TLS_DCONTEXT_SLOT));
/* An extra level of indirection with SELFPROT_DCONTEXT */
if (TEST(SELFPROT_DCONTEXT, dynamo_options.protect_mask)) {
ASSERT_NOT_TESTED();
PRE(ilist, where, XINST_CREATE_load
(dcontext, opnd_create_reg(reg),
OPND_CREATE_MEMPTR(reg, offsetof(dcontext_t, upcontext))));
}
}
/* prepare_for and cleanup_after assume that the stack looks the same after
* the call to the instrumentation routine, since it stores the app state
* on the stack.
* Returns the size of the data stored on the DR stack.
* WARNING: this routine does NOT save the fp/mmx/sse state, to do that the
* instrumentation routine should call proc_save_fpstate() and then
* proc_restore_fpstate()
* (This is because of expense:
* fsave takes 118 cycles!
* frstor (separated by 6 instrs from fsave) takes 89 cycles
* fxsave and fxrstor are not available on HP machine!
* supposedly they came out in PII
* on balrog: fxsave 91 cycles, fxrstor 173)
*
* For x64, changes the stack pointer by a multiple of 16.
*
* NOTE: The client interface's get/set mcontext functions and the
* hotpatching gateway rely on the app's context being available
* on the dstack in a particular format. Do not corrupt this data
* unless you update all users of this data!
*
* NOTE : this routine clobbers TLS_XAX_SLOT and the XSP mcontext slot.
* We guarantee to clients that all other slots (except the XAX mcontext slot)
* will remain untouched.
*
* N.B.: insert_parameter_preparation (and our documentation for
* dr_prepare_for_call) assumes that this routine only modifies xsp
* and xax and no other registers.
*/
/* number of extra slots in addition to register slots. */
#define NUM_EXTRA_SLOTS 2 /* pc, aflags */
uint
prepare_for_clean_call(dcontext_t *dcontext, clean_call_info_t *cci,
instrlist_t *ilist, instr_t *instr)
{
uint dstack_offs = 0;
if (cci == NULL)
cci = &default_clean_call_info;
/* Swap stacks. For thread-shared, we need to get the dcontext
* dynamically rather than use the constant passed in here. Save
* away xax in a TLS slot and then load the dcontext there.
*/
if (SCRATCH_ALWAYS_TLS()) {
PRE(ilist, instr, instr_create_save_to_tls
(dcontext, SCRATCH_REG0, TLS_REG0_SLOT));
insert_get_mcontext_base(dcontext, ilist, instr, SCRATCH_REG0);
#ifdef AARCH64
/* We need an addtional scratch register for saving the SP.
* TLS_REG1_SLOT is not safe since it may be used by clients.
* Instead we save it to dcontext.mcontext.x0, which is not
* used by dr_save_reg (see definition of SPILL_SLOT_MC_REG).
*/
PRE(ilist, instr,
XINST_CREATE_store(dcontext,
OPND_CREATE_MEMPTR(SCRATCH_REG0, 0),
opnd_create_reg(SCRATCH_REG1)));
PRE(ilist, instr,
XINST_CREATE_move(dcontext, opnd_create_reg(SCRATCH_REG1),
opnd_create_reg(DR_REG_XSP)));
PRE(ilist, instr,
XINST_CREATE_store(dcontext,
opnd_create_dcontext_field_via_reg_sz
(dcontext, SCRATCH_REG0,
XSP_OFFSET, OPSZ_PTR),
opnd_create_reg(SCRATCH_REG1)));
#else
PRE(ilist, instr, instr_create_save_to_dc_via_reg
(dcontext, SCRATCH_REG0, REG_XSP, XSP_OFFSET));
#endif
/* DSTACK_OFFSET isn't within the upcontext so if it's separate this won't
* work right. FIXME - the dcontext accessing routines are a mess of shared
* vs. no shared support, separate context vs. no separate context support etc. */
ASSERT_NOT_IMPLEMENTED(!TEST(SELFPROT_DCONTEXT, dynamo_options.protect_mask));
#ifdef WINDOWS
/* i#249: swap PEB pointers while we have dcxt in reg. We risk "silent
* death" by using xsp as scratch but don't have simple alternative.
* We don't support non-SCRATCH_ALWAYS_TLS.
*/
/* XXX: should use clean callee analysis to remove pieces of this
* such as errno preservation
*/
if (!cci->out_of_line_swap) {
preinsert_swap_peb(dcontext, ilist, instr, !SCRATCH_ALWAYS_TLS(),
REG_XAX/*dc*/, REG_XSP/*scratch*/, true/*to priv*/);
}
#endif
#ifdef AARCH64
PRE(ilist, instr,
XINST_CREATE_load(dcontext,
opnd_create_reg(SCRATCH_REG1),
opnd_create_dcontext_field_via_reg_sz
(dcontext, SCRATCH_REG0,
DSTACK_OFFSET, OPSZ_PTR)));
PRE(ilist, instr,
XINST_CREATE_move(dcontext, opnd_create_reg(DR_REG_XSP),
opnd_create_reg(SCRATCH_REG1)));
/* Restore scratch_reg from dcontext.mcontext.x0. */
PRE(ilist, instr,
XINST_CREATE_load(dcontext, opnd_create_reg(SCRATCH_REG1),
OPND_CREATE_MEMPTR(SCRATCH_REG0, 0)));
#else
PRE(ilist, instr, instr_create_restore_from_dc_via_reg
(dcontext, SCRATCH_REG0, REG_XSP, DSTACK_OFFSET));
#endif
/* Restore SCRATCH_REG0 before pushing the context on the dstack. */
PRE(ilist, instr, instr_create_restore_from_tls
(dcontext, SCRATCH_REG0, TLS_REG0_SLOT));
}
else {
IF_AARCH64(ASSERT_NOT_REACHED());
PRE(ilist, instr, instr_create_save_to_dcontext(dcontext, REG_XSP, XSP_OFFSET));
#ifdef WINDOWS
if (!cci->out_of_line_swap) {
preinsert_swap_peb(dcontext, ilist, instr, !SCRATCH_ALWAYS_TLS(),
REG_XAX/*unused*/, REG_XSP/*scratch*/, true/*to priv*/);
}
#endif
PRE(ilist, instr, instr_create_restore_dynamo_stack(dcontext));
}
/* Save flags and all registers, in priv_mcontext_t order.
* We're at base of dstack so should be nicely aligned.
*/
ASSERT(ALIGNED(dcontext->dstack, PAGE_SIZE));
/* Note that we do NOT bother to put the correct pre-push app xsp value on the
* stack here, as an optimization for callees who never ask for it: instead we
* rely on dr_[gs]et_mcontext() to fix it up if asked for. We can get away w/
* this while hotpatching cannot (hotp_inject_gateway_call() fixes it up every
* time) b/c the callee has to ask for the priv_mcontext_t.
*/
if (cci->out_of_line_swap) {
dstack_offs +=
insert_out_of_line_context_switch(dcontext, ilist, instr, true);
} else {
dstack_offs +=
insert_push_all_registers(dcontext, cci, ilist, instr, (uint)PAGE_SIZE,
OPND_CREATE_INT32(0), REG_NULL
_IF_AARCH64(false));
insert_clear_eflags(dcontext, cci, ilist, instr);
/* XXX: add a cci field for optimizing this away if callee makes no calls */
}
/* We no longer need to preserve the app's errno on Windows except
* when using private libraries, so its preservation is in
* preinsert_swap_peb().
* We do not need to preserve DR's Linux errno across app execution.
*/
#if (defined(X86) && defined(X64)) || defined(MACOS)
/* PR 218790: maintain 16-byte rsp alignment.
* insert_parameter_preparation() currently assumes we leave rsp aligned.
*/
/* check if need adjust stack for alignment. */
if (cci->should_align) {
uint num_slots = NUM_GP_REGS + NUM_EXTRA_SLOTS;
if (cci->skip_save_flags)
num_slots -= 2;
num_slots -= cci->num_regs_skip; /* regs that not saved */
if ((num_slots % 2) == 1) {
ASSERT((dstack_offs % 16) == 8);
PRE(ilist, instr, INSTR_CREATE_lea
(dcontext, opnd_create_reg(REG_XSP),
OPND_CREATE_MEM_lea(REG_XSP, REG_NULL, 0, -(int)XSP_SZ)));
dstack_offs += XSP_SZ;
} else {
ASSERT((dstack_offs % 16) == 0);
}
}
#endif
ASSERT(cci->skip_save_flags ||
cci->num_simd_skip != 0 ||
cci->num_regs_skip != 0 ||
dstack_offs == sizeof(priv_mcontext_t) + clean_call_beyond_mcontext());
return dstack_offs;
}
void
cleanup_after_clean_call(dcontext_t *dcontext, clean_call_info_t *cci,
instrlist_t *ilist, instr_t *instr)
{
if (cci == NULL)
cci = &default_clean_call_info;
/* saved error code is currently on the top of the stack */
#if (defined(X86) && defined(X64)) || defined(MACOS)
/* PR 218790: remove the padding we added for 16-byte rsp alignment */
if (cci->should_align) {
uint num_slots = NUM_GP_REGS + NUM_EXTRA_SLOTS;
if (cci->skip_save_flags)
num_slots += 2;
num_slots -= cci->num_regs_skip; /* regs that not saved */
if ((num_slots % 2) == 1) {
PRE(ilist, instr, INSTR_CREATE_lea
(dcontext, opnd_create_reg(REG_XSP),
OPND_CREATE_MEM_lea(REG_XSP, REG_NULL, 0, XSP_SZ)));
}
}
#endif
/* now restore everything */
if (cci->out_of_line_swap) {
insert_out_of_line_context_switch(dcontext, ilist, instr, false);
} else {
/* XXX: add a cci field for optimizing this away if callee makes no calls */
insert_pop_all_registers(dcontext, cci, ilist, instr,
/* see notes in prepare_for_clean_call() */
(uint)PAGE_SIZE _IF_AARCH64(false));
}
/* Swap stacks back. For thread-shared, we need to get the dcontext
* dynamically. Save xax in TLS so we can use it as scratch.
*/
if (SCRATCH_ALWAYS_TLS()) {
PRE(ilist, instr, instr_create_save_to_tls
(dcontext, SCRATCH_REG0, TLS_REG0_SLOT));
insert_get_mcontext_base(dcontext, ilist, instr, SCRATCH_REG0);
#ifdef WINDOWS
/* i#249: swap PEB pointers while we have dcxt in reg. We risk "silent
* death" by using xsp as scratch but don't have simple alternative.
* We don't support non-SCRATCH_ALWAYS_TLS.
*/
if (!cci->out_of_line_swap) {
preinsert_swap_peb(dcontext, ilist, instr, !SCRATCH_ALWAYS_TLS(),
REG_XAX/*dc*/, REG_XSP/*scratch*/, false/*to app*/);
}
#endif
#ifdef AARCH64
/* TLS_REG1_SLOT is not safe since it may be used by clients.
* We save it to dcontext.mcontext.x0.
*/
PRE(ilist, instr,
XINST_CREATE_store(dcontext,
OPND_CREATE_MEMPTR(SCRATCH_REG0, 0),
opnd_create_reg(SCRATCH_REG1)));
PRE(ilist, instr,
XINST_CREATE_load(dcontext,
opnd_create_reg(SCRATCH_REG1),
opnd_create_dcontext_field_via_reg_sz
(dcontext, SCRATCH_REG0,
XSP_OFFSET, OPSZ_PTR)));
PRE(ilist, instr,
XINST_CREATE_move(dcontext, opnd_create_reg(DR_REG_XSP),
opnd_create_reg(SCRATCH_REG1)));
/* Restore scratch_reg from dcontext.mcontext.x0. */
PRE(ilist, instr,
XINST_CREATE_load(dcontext, opnd_create_reg(SCRATCH_REG1),
OPND_CREATE_MEMPTR(SCRATCH_REG0, 0)));
#else
PRE(ilist, instr, instr_create_restore_from_dc_via_reg
(dcontext, SCRATCH_REG0, REG_XSP, XSP_OFFSET));
#endif
PRE(ilist, instr, instr_create_restore_from_tls
(dcontext, SCRATCH_REG0, TLS_REG0_SLOT));
}
else {
IF_AARCH64(ASSERT_NOT_REACHED());
#ifdef WINDOWS
if (!cci->out_of_line_swap) {
preinsert_swap_peb(dcontext, ilist, instr, !SCRATCH_ALWAYS_TLS(),
REG_XAX/*unused*/, REG_XSP/*scratch*/, false/*to app*/);
}
#endif
PRE(ilist, instr,
instr_create_restore_from_dcontext(dcontext, REG_XSP, XSP_OFFSET));
}
}
bool
parameters_stack_padded(void)
{
return (REGPARM_MINSTACK > 0 || REGPARM_END_ALIGN > XSP_SZ);
}
/* Inserts a complete call to callee with the passed-in arguments.
* For x64, assumes the stack pointer is currently 16-byte aligned.
* Clean calls ensure this by using clean base of dstack and having
* dr_prepare_for_call pad to 16 bytes.
* Returns whether the call is direct.
*/
bool
insert_meta_call_vargs(dcontext_t *dcontext, instrlist_t *ilist, instr_t *instr,
meta_call_flags_t flags, byte *encode_pc, void *callee,
uint num_args, opnd_t *args)
{
instr_t *in = (instr == NULL) ? instrlist_last(ilist) : instr_get_prev(instr);
bool direct;
uint stack_for_params =
insert_parameter_preparation(dcontext, ilist, instr,
TEST(META_CALL_CLEAN, flags), num_args, args);
IF_X64(ASSERT(ALIGNED(stack_for_params, 16)));
#ifdef CLIENT_INTERFACE
if (TEST(META_CALL_CLEAN, flags) && DYNAMO_OPTION(profile_pcs)) {
if (SCRATCH_ALWAYS_TLS()) {
/* SCRATCH_REG0 is dead here, because clean calls only support "cdecl",
* which specifies that the caller must save xax (and xcx and xdx)
*/
insert_get_mcontext_base(dcontext, ilist, instr, SCRATCH_REG0);
# ifdef AARCH64
/* TLS_REG1_SLOT is not safe since it may be used by clients.
* We save it to dcontext.mcontext.x0.
*/
PRE(ilist, instr,
XINST_CREATE_store(dcontext,
OPND_CREATE_MEMPTR(SCRATCH_REG0, 0),
opnd_create_reg(SCRATCH_REG1)));
instrlist_insert_mov_immed_ptrsz(dcontext, (ptr_int_t)WHERE_CLEAN_CALLEE,
opnd_create_reg(SCRATCH_REG1),
ilist, instr, NULL, NULL);
PRE(ilist, instr,
instr_create_save_to_dc_via_reg(dcontext, SCRATCH_REG0, SCRATCH_REG1,
WHEREAMI_OFFSET));
/* Restore scratch_reg from dcontext.mcontext.x0. */
PRE(ilist, instr,
XINST_CREATE_load(dcontext, opnd_create_reg(SCRATCH_REG1),
OPND_CREATE_MEMPTR(SCRATCH_REG0, 0)));
# else
PRE(ilist, instr,
instr_create_save_immed_to_dc_via_reg(dcontext, SCRATCH_REG0,
WHEREAMI_OFFSET,
(uint) WHERE_CLEAN_CALLEE, OPSZ_4));
# endif
} else {
PRE(ilist, instr, XINST_CREATE_store(dcontext,
opnd_create_dcontext_field(dcontext, WHEREAMI_OFFSET),
OPND_CREATE_INT32(WHERE_CLEAN_CALLEE)));
}
}
#endif
/* If we need an indirect call, we use r11 as the last of the scratch regs.
* We document this to clients using dr_insert_call_ex() or DR_CLEANCALL_INDIRECT.
*/
direct = insert_reachable_cti(dcontext, ilist, instr, encode_pc, (byte *)callee,
false/*call*/, TEST(META_CALL_RETURNS, flags),
false/*!precise*/, DR_REG_R11, NULL);
if (stack_for_params > 0) {
/* XXX PR 245936: let user decide whether to clean up?
* i.e., support calling a stdcall routine?
*/
PRE(ilist, instr, XINST_CREATE_add(dcontext, opnd_create_reg(REG_XSP),
OPND_CREATE_INT32(stack_for_params)));
}
#ifdef CLIENT_INTERFACE
if (TEST(META_CALL_CLEAN, flags) && DYNAMO_OPTION(profile_pcs)) {
uint whereami;
if (TEST(META_CALL_RETURNS_TO_NATIVE, flags))
whereami = (uint) WHERE_APP;
else
whereami = (uint) WHERE_FCACHE;
if (SCRATCH_ALWAYS_TLS()) {
/* SCRATCH_REG0 is dead here: restore of the app stack will clobber xax */
insert_get_mcontext_base(dcontext, ilist, instr, SCRATCH_REG0);
# ifdef AARCH64
/* TLS_REG1_SLOT is not safe since it may be used by clients.
* We save it to dcontext.mcontext.x0.
*/
PRE(ilist, instr,
XINST_CREATE_store(dcontext,
OPND_CREATE_MEMPTR(SCRATCH_REG0, 0),
opnd_create_reg(SCRATCH_REG1)));
instrlist_insert_mov_immed_ptrsz(dcontext, (ptr_int_t)whereami,
opnd_create_reg(SCRATCH_REG1),
ilist, instr, NULL, NULL);
PRE(ilist, instr,
instr_create_save_to_dc_via_reg(dcontext, SCRATCH_REG0, SCRATCH_REG1,
WHEREAMI_OFFSET));
/* Restore scratch_reg from dcontext.mcontext.x0. */
PRE(ilist, instr,
XINST_CREATE_load(dcontext, opnd_create_reg(SCRATCH_REG1),
OPND_CREATE_MEMPTR(SCRATCH_REG0, 0)));
# else
PRE(ilist, instr,
instr_create_save_immed_to_dc_via_reg(dcontext, SCRATCH_REG0,
WHEREAMI_OFFSET, whereami, OPSZ_4));
# endif
} else {
PRE(ilist, instr, XINST_CREATE_store(dcontext,
opnd_create_dcontext_field(dcontext, WHEREAMI_OFFSET),
OPND_CREATE_INT32(whereami)));
}
}
#endif
/* mark it all meta */
if (in == NULL)
in = instrlist_first(ilist);
else
in = instr_get_next(in);
while (in != instr) {
instr_set_meta(in);
in = instr_get_next(in);
}
return direct;
}
/*###########################################################################
*###########################################################################
*
* M A N G L I N G R O U T I N E S
*/
void
insert_mov_immed_ptrsz(dcontext_t *dcontext, ptr_int_t val, opnd_t dst,
instrlist_t *ilist, instr_t *instr,
OUT instr_t **first, OUT instr_t **last)
{
insert_mov_immed_arch(dcontext, NULL, NULL, val, dst,
ilist, instr, first, last);
}
void
insert_mov_instr_addr(dcontext_t *dcontext, instr_t *src, byte *encode_estimate,
opnd_t dst, instrlist_t *ilist, instr_t *instr,
OUT instr_t **first, OUT instr_t **last)
{
insert_mov_immed_arch(dcontext, src, encode_estimate, 0, dst,
ilist, instr, first, last);
}
void
insert_push_immed_ptrsz(dcontext_t *dcontext, ptr_int_t val,
instrlist_t *ilist, instr_t *instr,
OUT instr_t **first, OUT instr_t **last)
{
insert_push_immed_arch(dcontext, NULL, NULL, val,
ilist, instr, first, last);
}
void
insert_push_instr_addr(dcontext_t *dcontext, instr_t *src_inst, byte *encode_estimate,
instrlist_t *ilist, instr_t *instr,
OUT instr_t **first, OUT instr_t **last)
{
insert_push_immed_arch(dcontext, src_inst, encode_estimate, 0,
ilist, instr, first, last);
}
app_pc
get_app_instr_xl8(instr_t *instr)
{
/* assumption: target's translation or raw bits are set properly */
app_pc xl8 = instr_get_translation(instr);
if (xl8 == NULL && instr_raw_bits_valid(instr))
xl8 = instr_get_raw_bits(instr);
return xl8;
}
ptr_uint_t
get_call_return_address(dcontext_t *dcontext, instrlist_t *ilist, instr_t *instr)
{
ptr_uint_t retaddr, curaddr;
#ifdef CLIENT_INTERFACE
/* i#620: provide API to set fall-through and retaddr targets at end of bb */
if (instr_is_call(instr) && instrlist_get_return_target(ilist) != NULL) {
retaddr = (ptr_uint_t)instrlist_get_return_target(ilist);
LOG(THREAD, LOG_INTERP, 3, "set return target "PFX" by client\n", retaddr);
return retaddr;
}
#endif
/* For CI builds, use the translation field so we can handle cases
* where the client has changed the target and invalidated the raw
* bits. We'll make sure the translation is always set for direct
* calls.
*
* If a client changes an instr, or our own mangle_rel_addr() does,
* the raw bits won't be valid but the translation should be.
*/
curaddr = (ptr_uint_t) get_app_instr_xl8(instr);
ASSERT(curaddr != 0);
/* we use the next app instruction as return address as the client
* or DR may change the instruction and so its length.
*/
if (instr_raw_bits_valid(instr) &&
instr_get_translation(instr) == instr_get_raw_bits(instr)) {
/* optimization, if nothing changes, use instr->length to avoid
* calling decode_next_pc.
*/
retaddr = curaddr + instr->length;
} else {
retaddr = (ptr_uint_t) decode_next_pc(dcontext, (byte *)curaddr);
}
return retaddr;
}
#ifdef UNIX
/* find the system call number in instrlist for an inlined system call
* by simpling walking the ilist backward and finding "mov immed => %eax"
* without checking cti or expanding instr
*/
static int
ilist_find_sysnum(instrlist_t *ilist, instr_t *instr)
{
for (; instr != NULL; instr = instr_get_prev(instr)) {
ptr_int_t val;
if (instr_is_app(instr) &&
instr_is_mov_constant(instr, &val) &&
opnd_is_reg(instr_get_dst(instr, 0)) &&
reg_to_pointer_sized(opnd_get_reg(instr_get_dst(instr, 0))) ==
reg_to_pointer_sized(DR_REG_SYSNUM))
return (int) val;
}
ASSERT_NOT_REACHED();
return -1;
}
#endif
static void
mangle_syscall(dcontext_t *dcontext, instrlist_t *ilist, uint flags,
instr_t *instr, instr_t *next_instr)
{
#ifdef UNIX
if (get_syscall_method() != SYSCALL_METHOD_INT &&
get_syscall_method() != SYSCALL_METHOD_SYSCALL &&
get_syscall_method() != SYSCALL_METHOD_SYSENTER &&
get_syscall_method() != SYSCALL_METHOD_SVC) {
/* don't know convention on return address from kernel mode! */
SYSLOG_INTERNAL_ERROR("unsupported system call method");
LOG(THREAD, LOG_INTERP, 1, "don't know convention for this syscall method\n");
CLIENT_ASSERT(false, "Unsupported system call method detected. Please "
"reboot with the nosep kernel option if this is a 32-bit "
"2.5 or 2.6 version Linux kernel.");
}
/* cannot use dynamo stack in code cache, so we cannot insert a
* call -- instead we have interp end bbs at interrupts unless
* we can identify them as ignorable system calls. Otherwise,
* we just remove the instruction and jump back to dynamo to
* handle it.
*/
if (TESTANY(INSTR_NI_SYSCALL_ALL, instr->flags)) {
instrlist_remove(ilist, instr);
instr_destroy(dcontext, instr);
return;
}
/* signal barrier: need to be able to exit fragment immediately
* prior to syscall, so we set up an exit cti with a jmp right beforehand
* that by default hops over the exit cti.
* when we want to exit right before the syscall, we call the
* mangle_syscall_code() routine below.
*/
instr_t *skip_exit = INSTR_CREATE_label(dcontext);
PRE(ilist, instr, XINST_CREATE_jump_short(dcontext, opnd_create_instr(skip_exit)));
/* assumption: raw bits of instr == app pc */
ASSERT(instr_get_raw_bits(instr) != NULL);
/* this should NOT be a meta-instr so we don't use PRE */
/* note that it's ok if this gets linked: we unlink all outgoing exits in
* addition to changing the skip_exit jmp upon receiving a signal
*/
instrlist_preinsert(ilist, instr, XINST_CREATE_jump
(dcontext, opnd_create_pc(instr_get_raw_bits(instr))));
PRE(ilist, instr, skip_exit);
if (does_syscall_ret_to_callsite() &&
sysnum_is_not_restartable(ilist_find_sysnum(ilist, instr))) {
/* i#1216: we insert a nop instr right after inlined non-auto-restart
* syscall to make it a safe point for suspending.
* XXX-i#1216-c#2: we still need handle auto-restart syscall
*/
instr_t *nop = XINST_CREATE_nop(dcontext);
/* We make a fake app nop instr for easy handling in recreate_app_state.
* XXX: it is cleaner to mark our-mangling and handle it, but it seems
* ok to use a fake app nop instr, since the client won't see it.
*/
INSTR_XL8(nop, (instr_get_translation(instr) +
instr_length(dcontext, instr)));
instr_set_app(instr);
instrlist_postinsert(ilist, instr, nop);
}
#endif /* UNIX */
mangle_syscall_arch(dcontext, ilist, flags, instr, next_instr);
}
#ifdef UNIX
/* If skip is false:
* changes the jmp right before the next syscall (after pc) to target the
* exit cti immediately following it;
* If skip is true:
* changes back to the default, where skip hops over the exit cti,
* which is assumed to be located at pc.
*/
bool
mangle_syscall_code(dcontext_t *dcontext, fragment_t *f, byte *pc, bool skip)
{
byte *stop_pc = fragment_body_end_pc(dcontext, f);
byte *target, *prev_pc, *cti_pc = NULL, *skip_pc = NULL;
instr_t instr;
DEBUG_DECLARE(instr_t cti;)
instr_init(dcontext, &instr);
DODEBUG({ instr_init(dcontext, &cti); });
LOG(THREAD, LOG_SYSCALLS, 3,
"mangle_syscall_code: pc="PFX", skip=%d\n", pc, skip);
do {
instr_reset(dcontext, &instr);
prev_pc = pc;
pc = decode(dcontext, pc, &instr);
ASSERT(pc != NULL); /* our own code! */
if (instr_get_opcode(&instr) == OP_jmp_short
/* For A32 it's not OP_b_short */
IF_ARM(|| (instr_get_opcode(&instr) == OP_jmp &&
opnd_get_pc(instr_get_target(&instr)) == pc + ARM_INSTR_SIZE)))
skip_pc = prev_pc;
else if (instr_get_opcode(&instr) == OP_jmp)
cti_pc = prev_pc;
if (pc >= stop_pc) {
LOG(THREAD, LOG_SYSCALLS, 3, "\tno syscalls found\n");
instr_free(dcontext, &instr);
return false;
}
} while (!instr_is_syscall(&instr));
if (skip_pc == NULL) {
/* signal happened after skip jmp: nothing we can do here
*
* FIXME PR 213040: we should tell caller difference between
* "no syscalls" and "too-close syscall" and have it take
* other actions to bound signal delay
*/
instr_free(dcontext, &instr);
return false;
}
ASSERT(skip_pc != NULL && cti_pc != NULL);
/* jmps are right before syscall, but there can be nops to pad exit cti on x86 */
ASSERT(cti_pc == prev_pc - JMP_LONG_LENGTH);
ASSERT(skip_pc < cti_pc);
ASSERT(skip_pc == cti_pc - JMP_SHORT_LENGTH
IF_X86(|| *(cti_pc - JMP_SHORT_LENGTH) == RAW_OPCODE_nop));
instr_reset(dcontext, &instr);
pc = decode(dcontext, skip_pc, &instr);
ASSERT(pc != NULL); /* our own code! */
ASSERT(instr_get_opcode(&instr) == OP_jmp_short
/* For A32 it's not OP_b_short */
IF_ARM(|| (instr_get_opcode(&instr) == OP_jmp &&
opnd_get_pc(instr_get_target(&instr)) == pc + ARM_INSTR_SIZE)));
ASSERT(pc <= cti_pc); /* could be nops */
DOCHECK(1, {
pc = decode(dcontext, cti_pc, &cti);
ASSERT(pc != NULL); /* our own code! */
ASSERT(instr_get_opcode(&cti) == OP_jmp);
ASSERT(pc == prev_pc);
instr_reset(dcontext, &cti);
});
if (skip) {
/* target is syscall itself */
target = prev_pc;
} else {
/* target is exit cti */
target = cti_pc;
}
/* FIXME : this should work out to just a 1 byte write, but let's make
* it more clear that this is atomic! */
if (opnd_get_pc(instr_get_target(&instr)) != target) {
byte *nxt_pc;
LOG(THREAD, LOG_SYSCALLS, 3,
"\tmodifying target of syscall jmp to "PFX"\n", target);
instr_set_target(&instr, opnd_create_pc(target));
nxt_pc = instr_encode(dcontext, &instr, skip_pc);
ASSERT(nxt_pc != NULL && nxt_pc == cti_pc);
machine_cache_sync(skip_pc, nxt_pc, true);
} else {
LOG(THREAD, LOG_SYSCALLS, 3,
"\ttarget of syscall jmp is already "PFX"\n", target);
}
instr_free(dcontext, &instr);
return true;
}
#endif /* UNIX */
/* TOP-LEVEL MANGLE
* This routine is responsible for mangling a fragment into the form
* we'd like prior to placing it in the code cache
* If mangle_calls is false, ignores calls
* If record_translation is true, records translation target for each
* inserted instr -- but this slows down encoding in current implementation
*/
void
mangle(dcontext_t *dcontext, instrlist_t *ilist, uint *flags INOUT,
bool mangle_calls, bool record_translation)
{
instr_t *instr, *next_instr;
#ifdef WINDOWS
bool ignorable_sysenter = DYNAMO_OPTION(ignore_syscalls) &&
DYNAMO_OPTION(ignore_syscalls_follow_sysenter) &&
(get_syscall_method() == SYSCALL_METHOD_SYSENTER) &&
TEST(FRAG_HAS_SYSCALL, *flags);
#endif
/* Walk through instr list:
* -- convert exit branches to use near_rel form;
* -- convert direct calls into 'push %eip', aka return address;
* -- convert returns into 'pop %xcx (; add $imm, %xsp)';
* -- convert indirect branches into 'save %xcx; lea EA, %xcx';
* -- convert indirect calls as a combination of direct call and
* indirect branch conversion;
* -- ifdef STEAL_REGISTER, steal edi for our own use.
* -- ifdef UNIX, mangle seg ref and mov_seg
*/
KSTART(mangling);
instrlist_set_our_mangling(ilist, true); /* PR 267260 */
#ifdef ARM
if (INTERNAL_OPTION(store_last_pc)) {
/* This is a simple debugging feature. There's a chance that some
* mangling clobbers the r3 slot but it's slim, and it's much
* simpler to put this at the top than try to put it right before
* the exit cti(s).
*/
PRE(ilist, instrlist_first(ilist),
instr_create_save_to_tls(dcontext, DR_REG_PC, TLS_REG3_SLOT));
}
#endif
for (instr = instrlist_first(ilist);
instr != NULL;
instr = next_instr) {
/* don't mangle anything that mangle inserts! */
next_instr = instr_get_next(instr);
if (!instr_opcode_valid(instr))
continue;
#ifdef ANNOTATIONS
if (is_annotation_return_placeholder(instr)) {
instrlist_remove(ilist, instr);
instr_destroy(dcontext, instr);
continue;
}
#endif
if (record_translation) {
/* make sure inserted instrs translate to the original instr */
app_pc xl8 = get_app_instr_xl8(instr);
instrlist_set_translation_target(ilist, xl8);
}
#ifdef X86_64
if (DYNAMO_OPTION(x86_to_x64) &&
IF_WINDOWS_ELSE(is_wow64_process(NT_CURRENT_PROCESS), false) &&
instr_get_x86_mode(instr))
translate_x86_to_x64(dcontext, ilist, &instr);
#endif
#if defined(UNIX) && defined(X86)
if (INTERNAL_OPTION(mangle_app_seg) && instr_is_app(instr)) {
/* The instr might be changed by client, and we cannot rely on
* PREFIX_SEG_FS/GS. So we simply call mangle_seg_ref on every
* instruction and mangle it if necessary.
*/
mangle_seg_ref(dcontext, ilist, instr, next_instr);
if (instr_get_opcode(instr) == OP_mov_seg)
mangle_mov_seg(dcontext, ilist, instr, next_instr);
}
#endif
#ifdef X86
if (instr_saves_float_pc(instr) && instr_is_app(instr)) {
mangle_float_pc(dcontext, ilist, instr, next_instr, flags);
}
#endif
#ifdef AARCH64
if (instr_is_icache_op(instr) && instr_is_app(instr)) {
next_instr = mangle_icache_op(dcontext, ilist, instr, next_instr,
get_app_instr_xl8(next_instr));
continue;
}
#endif
#if defined(X64) || defined(ARM)
/* i#393: mangle_rel_addr might destroy the instr if it is a LEA,
* which makes instr point to freed memory.
* In such case, the control should skip later checks on the instr
* for exit_cti and syscall.
* skip the rest of the loop if instr is destroyed.
*/
if (instr_has_rel_addr_reference(instr)
/* XXX i#1834: it should be up to the app to re-relativize, yet on amd64
* our own samples are relying on DR re-relativizing (and we just haven't
* run big enough apps to hit reachability problems) so for now we continue
* mangling meta instrs for x86 builds.
*/
IF_ARM(&& instr_is_app(instr))) {
instr_t *res = mangle_rel_addr(dcontext, ilist, instr, next_instr);
/* Either returns NULL == destroyed "instr", or a new next_instr */
if (res == NULL)
continue;
else
next_instr = res;
}
#endif /* X64 || ARM */
#ifdef AARCHXX
if (!instr_is_meta(instr) && instr_reads_thread_register(instr)) {
next_instr = mangle_reads_thread_register(dcontext, ilist,
instr, next_instr);
continue;
}
#endif /* ARM || AARCH64 */
#ifdef AARCH64
if (!instr_is_meta(instr) && instr_writes_thread_register(instr)) {
next_instr = mangle_writes_thread_register(dcontext, ilist,
instr, next_instr);
continue;
}
if (!instr_is_meta(instr) && instr_uses_reg(instr, dr_reg_stolen))
next_instr = mangle_special_registers(dcontext, ilist, instr, next_instr);
#endif /* AARCH64 */
#ifdef ARM
/* Our stolen reg model is to expose to the client. We assume that any
* meta instrs using it are using it as TLS. Ditto w/ use of PC.
*/
if (!instr_is_meta(instr) &&
(instr_uses_reg(instr, DR_REG_PC) || instr_uses_reg(instr, dr_reg_stolen)))
next_instr = mangle_special_registers(dcontext, ilist, instr, next_instr);
#endif /* ARM */
if (instr_is_exit_cti(instr)) {
#ifdef X86
mangle_exit_cti_prefixes(dcontext, instr);
#endif
/* to avoid reachability problems we convert all
* 8-bit-offset jumps that exit the fragment to 32-bit.
* Note that data16 jmps are implicitly converted via the
* absolute target and loss of prefix info (xref PR 225937).
*/
if (instr_is_cti_short(instr)) {
/* convert short jumps */
convert_to_near_rel(dcontext, instr);
}
}
#ifdef ANNOTATIONS
if (is_annotation_label(instr)) {
mangle_annotation_helper(dcontext, instr, ilist);
continue;
}
#endif
/* PR 240258: wow64 call* gateway is considered is_syscall */
if (instr_is_syscall(instr)) {
#ifdef WINDOWS
/* For XP & 2003, which use sysenter, we process the syscall after all
* mangling is completed, since we need to insert a reference to the
* post-sysenter instruction. If that instruction is a 'ret', which
* we've seen on both os's at multiple patch levels, we'd have a
* dangling reference since it's deleted in mangle_return(). To avoid
* that case, we defer syscall processing until mangling is completed.
*/
if (!ignorable_sysenter)
#endif
mangle_syscall(dcontext, ilist, *flags, instr, next_instr);
continue;
}
else if (instr_is_interrupt(instr)) { /* non-syscall interrupt */
mangle_interrupt(dcontext, ilist, instr, next_instr);
continue;
}
#ifdef X86
/*
* i#2144 : We look for single step exceptions generation.
*/
else if (instr_can_set_single_step(instr) &&
instr_get_opcode(instr) != OP_iret) {
/* iret is handled in mangle_return. */
mangle_possible_single_step(dcontext, ilist, instr);
continue;
}
else if (dcontext->single_step_addr != NULL &&
dcontext->single_step_addr == instr->translation) {
mangle_single_step(dcontext, ilist, *flags, instr);
/* Resets to generate single step exception only once. */
dcontext->single_step_addr = NULL;
}
#endif
#ifdef FOOL_CPUID
else if (instr_get_opcode(instr) == OP_cpuid) {
mangle_cpuid(dcontext, ilist, instr, next_instr);
continue;
}
#endif
if (!instr_is_cti(instr) || instr_is_meta(instr)) {
#ifdef STEAL_REGISTER
steal_reg(dcontext, instr, ilist);
#endif
#ifdef CLIENT_INTERFACE
if (TEST(INSTR_CLOBBER_RETADDR, instr->flags) && instr_is_label(instr)) {
/* move the value to the note field (which the client cannot
* possibly use at this point) so we don't have to search for
* this label when we hit the ret instr
*/
dr_instr_label_data_t *data = instr_get_label_data_area(instr);
instr_t *tmp;
instr_t *ret = (instr_t *) data->data[0];
CLIENT_ASSERT(ret != NULL,
"dr_clobber_retaddr_after_read()'s label is corrupted");
/* avoid use-after-free if client removed the ret by ensuring
* this instr_t pointer does exist.
* note that we don't want to go searching based just on a flag
* as we want tight coupling w/ a pointer as a general way
* to store per-instr data outside of the instr itself.
*/
for (tmp = instr_get_next(instr); tmp != NULL;
tmp = instr_get_next(tmp)) {
if (tmp == ret) {
tmp->note = (void *) data->data[1]; /* the value to use */
break;
}
}
}
#endif
continue;
}
#ifdef STEAL_REGISTER
if (ilist->flags) {
restore_state(dcontext, instr, ilist); /* end of edi calculation */
}
#endif
if (instr_is_call_direct(instr)) {
/* mangle_direct_call may inline a call and remove next_instr, so
* it passes us the updated next instr */
next_instr = mangle_direct_call(dcontext, ilist, instr, next_instr,
mangle_calls, *flags);
} else if (instr_is_call_indirect(instr)) {
next_instr = mangle_indirect_call(dcontext, ilist, instr, next_instr,
mangle_calls, *flags);
} else if (instr_is_return(instr)) {
mangle_return(dcontext, ilist, instr, next_instr, *flags);
} else if (instr_is_mbr(instr)) {
next_instr = mangle_indirect_jump(dcontext, ilist, instr, next_instr, *flags);
#ifdef X86
} else if (instr_get_opcode(instr) == OP_jmp_far) {
mangle_far_direct_jump(dcontext, ilist, instr, next_instr, *flags);
#endif
}
/* else nothing to do, e.g. direct branches */
}
#ifdef WINDOWS
/* Do XP & 2003 ignore-syscalls processing now. */
if (ignorable_sysenter) {
/* Check for any syscalls and process them. */
for (instr = instrlist_first(ilist);
instr != NULL;
instr = next_instr) {
next_instr = instr_get_next(instr);
if (instr_opcode_valid(instr) && instr_is_syscall(instr))
mangle_syscall(dcontext, ilist, *flags, instr, next_instr);
}
}
#endif
if (record_translation)
instrlist_set_translation_target(ilist, NULL);
instrlist_set_our_mangling(ilist, false); /* PR 267260 */
#if defined(X86) && defined(X64)
if (!X64_CACHE_MODE_DC(dcontext)) {
instr_t *in;
for (in = instrlist_first(ilist); in != NULL; in = instr_get_next(in)) {
if (instr_is_our_mangling(in)) {
instr_set_x86_mode(in, true/*x86*/);
instr_shrink_to_32_bits(in);
}
}
}
#endif
/* The following assertion should be guaranteed by fact that all
* blocks end in some kind of branch, and the code above restores
* the register state on a branch. */
ASSERT(ilist->flags == 0);
KSTOP(mangling);
}
/***************************************************************************
* SYSCALL
*/
#ifdef CLIENT_INTERFACE
static bool
cti_is_normal_elision(instr_t *instr)
{
instr_t *next;
opnd_t tgt;
app_pc next_pc;
if (instr == NULL || instr_is_meta(instr))
return false;
if (!instr_is_ubr(instr) && !instr_is_call_direct(instr))
return false;
next = instr_get_next(instr);
if (next == NULL || instr_is_meta(next))
return false;
tgt = instr_get_target(instr);
next_pc = get_app_instr_xl8(next);
if (opnd_is_pc(tgt) && next_pc != NULL && opnd_get_pc(tgt) == next_pc)
return true;
return false;
}
#endif
/* Tries to statically find the syscall number for the
* syscall instruction instr.
* Returns -1 upon failure.
* Note that on MacOS, 32-bit Mach syscalls are encoded using negative numbers
* (although -1 is invalid), so be sure to test for -1 and not just <0 as a failure
* code.
*/
int
find_syscall_num(dcontext_t *dcontext, instrlist_t *ilist, instr_t *instr)
{
int syscall = -1;
ptr_int_t value;
instr_t *prev = instr_get_prev(instr);
/* Allow either eax or rax for x86_64 */
reg_id_t sysreg = reg_to_pointer_sized(DR_REG_SYSNUM);
if (prev != NULL) {
prev = instr_get_prev_expanded(dcontext, ilist, instr);
/* walk backwards looking for "mov imm->xax"
* may be other instrs placing operands into registers
* for the syscall in between
*/
while (prev != NULL &&
!instr_is_syscall(prev) && !instr_is_interrupt(prev) &&
!instr_writes_to_reg(prev, sysreg, DR_QUERY_INCLUDE_ALL)) {
#ifdef CLIENT_INTERFACE
/* if client added cti in between, bail and assume non-ignorable */
if (instr_is_cti(prev) &&
!(cti_is_normal_elision(prev)
IF_WINDOWS(|| instr_is_call_sysenter_pattern
(prev, instr_get_next(prev), instr))))
return -1;
#endif
prev = instr_get_prev_expanded(dcontext, ilist, prev);
}
if (prev != NULL && !instr_is_predicated(prev) &&
instr_is_mov_constant(prev, &value) &&
opnd_is_reg(instr_get_dst(prev, 0)) &&
reg_to_pointer_sized(opnd_get_reg(instr_get_dst(prev, 0))) == sysreg) {
#ifdef CLIENT_INTERFACE
instr_t *walk, *tgt;
#endif
IF_X64(ASSERT_TRUNCATE(int, int, value));
syscall = (int) value;
#ifdef ARM
if (opnd_get_size(instr_get_dst(prev, 0)) != OPSZ_PTR) {
/* sub-reg write: special-case movw,movt, else bail */
if (instr_get_opcode(prev) == OP_movt) {
ptr_int_t val2;
prev = instr_get_prev_expanded(dcontext, ilist, prev);
if (prev != NULL && instr_is_mov_constant(prev, &val2)) {
syscall = (int) (value << 16) | (val2 & 0xffff);
} else
return -1;
} else
return -1;
}
#endif
#ifdef CLIENT_INTERFACE
/* if client added cti target in between, bail and assume non-ignorable */
for (walk = instrlist_first_expanded(dcontext, ilist);
walk != NULL;
walk = instr_get_next_expanded(dcontext, ilist, walk)) {
if (instr_is_cti(walk) && opnd_is_instr(instr_get_target(walk))) {
for (tgt = opnd_get_instr(instr_get_target(walk));
tgt != NULL;
tgt = instr_get_next_expanded(dcontext, ilist, tgt)) {
if (tgt == prev)
break;
if (tgt == instr)
return -1;
}
}
}
#endif
}
}
IF_X64(ASSERT_TRUNCATE(int, int, syscall));
return (int) syscall;
}
/* END OF CONTROL-FLOW MANGLING ROUTINES
*###########################################################################
*###########################################################################
*/
void
clean_call_info_init(clean_call_info_t *cci, void *callee,
bool save_fpstate, uint num_args)
{
memset(cci, 0, sizeof(*cci));
cci->callee = callee;
cci->num_args = num_args;
cci->save_fpstate = save_fpstate;
cci->save_all_regs = true;
cci->should_align = true;
cci->callee_info = &default_callee_info;
}
void
mangle_init(void)
{
mangle_arch_init();
/* create a default func_info for:
* 1. clean call callee that cannot be analyzed.
* 2. variable clean_callees will not be updated during the execution
* and can be set write protected.
*/
#ifdef CLIENT_INTERFACE
clean_call_opt_init();
clean_call_info_init(&default_clean_call_info, NULL, false, 0);
#endif
}
void
mangle_exit(void)
{
#ifdef CLIENT_INTERFACE
clean_call_opt_exit();
#endif
}
| 1 | 11,855 | This is used only for out-of-line -- so yes this seems right to do for x64. Inlined is aligned separately at the end of prepare_for_clean_call(). There the ifdef is x86_64 or MACOS -- no ARM, why not? Also, please add || MACOS here to match the inlined. | DynamoRIO-dynamorio | c |
@@ -271,7 +271,8 @@ final class MySQLSpanStore implements SpanStore {
.selectDistinct(ZIPKIN_SPANS.NAME)
.from(ZIPKIN_SPANS)
.join(ZIPKIN_ANNOTATIONS)
- .on(ZIPKIN_SPANS.TRACE_ID.eq(ZIPKIN_ANNOTATIONS.TRACE_ID))
+ .on(ZIPKIN_SPANS.TRACE_ID_HIGH.eq(ZIPKIN_ANNOTATIONS.TRACE_ID_HIGH))
+ .and(ZIPKIN_SPANS.TRACE_ID.eq(ZIPKIN_ANNOTATIONS.TRACE_ID))
.and(ZIPKIN_SPANS.ID.eq(ZIPKIN_ANNOTATIONS.SPAN_ID))
.where(ZIPKIN_ANNOTATIONS.ENDPOINT_SERVICE_NAME.eq(serviceName))
.orderBy(ZIPKIN_SPANS.NAME) | 1 | /**
* Copyright 2015-2017 The OpenZipkin Authors
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
* in compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package zipkin.storage.mysql;
import java.sql.Connection;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Date;
import java.util.Iterator;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
import javax.sql.DataSource;
import org.jooq.Condition;
import org.jooq.Cursor;
import org.jooq.DSLContext;
import org.jooq.Record;
import org.jooq.Row3;
import org.jooq.SelectConditionStep;
import org.jooq.SelectField;
import org.jooq.SelectOffsetStep;
import org.jooq.TableField;
import org.jooq.TableOnConditionStep;
import zipkin.Annotation;
import zipkin.BinaryAnnotation;
import zipkin.BinaryAnnotation.Type;
import zipkin.DependencyLink;
import zipkin.Endpoint;
import zipkin.internal.DependencyLinker;
import zipkin.internal.GroupByTraceId;
import zipkin.internal.Nullable;
import zipkin.internal.Pair;
import zipkin.storage.QueryRequest;
import zipkin.storage.SpanStore;
import zipkin.storage.mysql.internal.generated.tables.ZipkinAnnotations;
import zipkin2.Span;
import static java.util.Collections.emptyList;
import static java.util.stream.Collectors.groupingBy;
import static org.jooq.impl.DSL.row;
import static zipkin.BinaryAnnotation.Type.STRING;
import static zipkin.Constants.CLIENT_ADDR;
import static zipkin.Constants.CLIENT_SEND;
import static zipkin.Constants.ERROR;
import static zipkin.Constants.SERVER_ADDR;
import static zipkin.Constants.SERVER_RECV;
import static zipkin.internal.Util.UTF_8;
import static zipkin.internal.Util.getDays;
import static zipkin.storage.mysql.internal.generated.tables.ZipkinAnnotations.ZIPKIN_ANNOTATIONS;
import static zipkin.storage.mysql.internal.generated.tables.ZipkinDependencies.ZIPKIN_DEPENDENCIES;
import static zipkin.storage.mysql.internal.generated.tables.ZipkinSpans.ZIPKIN_SPANS;
final class MySQLSpanStore implements SpanStore {
private final DataSource datasource;
private final DSLContexts context;
private final Schema schema;
private final boolean strictTraceId;
MySQLSpanStore(DataSource datasource, DSLContexts context, Schema schema, boolean strictTraceId) {
this.datasource = datasource;
this.context = context;
this.schema = schema;
this.strictTraceId = strictTraceId;
}
private Endpoint endpoint(Record a) {
String serviceName = a.getValue(ZIPKIN_ANNOTATIONS.ENDPOINT_SERVICE_NAME);
if (serviceName == null) return null;
return Endpoint.builder()
.serviceName(serviceName)
.port(a.getValue(ZIPKIN_ANNOTATIONS.ENDPOINT_PORT))
.ipv4(a.getValue(ZIPKIN_ANNOTATIONS.ENDPOINT_IPV4))
.ipv6(maybeGet(a, ZIPKIN_ANNOTATIONS.ENDPOINT_IPV6, null)).build();
}
SelectOffsetStep<? extends Record> toTraceIdQuery(DSLContext context, QueryRequest request) {
long endTs = (request.endTs > 0 && request.endTs != Long.MAX_VALUE) ? request.endTs * 1000
: System.currentTimeMillis() * 1000;
TableOnConditionStep<?> table = ZIPKIN_SPANS.join(ZIPKIN_ANNOTATIONS)
.on(schema.joinCondition(ZIPKIN_ANNOTATIONS));
int i = 0;
for (String key : request.annotations) {
ZipkinAnnotations aTable = ZIPKIN_ANNOTATIONS.as("a" + i++);
table = maybeOnService(table.join(aTable)
.on(schema.joinCondition(aTable))
.and(aTable.A_KEY.eq(key)), aTable, request.serviceName);
}
for (Map.Entry<String, String> kv : request.binaryAnnotations.entrySet()) {
ZipkinAnnotations aTable = ZIPKIN_ANNOTATIONS.as("a" + i++);
table = maybeOnService(table.join(aTable)
.on(schema.joinCondition(aTable))
.and(aTable.A_TYPE.eq(STRING.value))
.and(aTable.A_KEY.eq(kv.getKey()))
.and(aTable.A_VALUE.eq(kv.getValue().getBytes(UTF_8))), aTable, request.serviceName);
}
List<SelectField<?>> distinctFields = new ArrayList<>(schema.spanIdFields);
distinctFields.add(ZIPKIN_SPANS.START_TS.max());
SelectConditionStep<Record> dsl = context.selectDistinct(distinctFields)
.from(table)
.where(ZIPKIN_SPANS.START_TS.between(endTs - request.lookback * 1000, endTs));
if (request.serviceName != null) {
dsl.and(ZIPKIN_ANNOTATIONS.ENDPOINT_SERVICE_NAME.eq(request.serviceName));
}
if (request.spanName != null) {
dsl.and(ZIPKIN_SPANS.NAME.eq(request.spanName));
}
if (request.minDuration != null && request.maxDuration != null) {
dsl.and(ZIPKIN_SPANS.DURATION.between(request.minDuration, request.maxDuration));
} else if (request.minDuration != null) {
dsl.and(ZIPKIN_SPANS.DURATION.greaterOrEqual(request.minDuration));
}
return dsl
.groupBy(schema.spanIdFields)
.orderBy(ZIPKIN_SPANS.START_TS.max().desc()).limit(request.limit);
}
static TableOnConditionStep<?> maybeOnService(TableOnConditionStep<Record> table,
ZipkinAnnotations aTable, String serviceName) {
if (serviceName == null) return table;
return table.and(aTable.ENDPOINT_SERVICE_NAME.eq(serviceName));
}
List<List<zipkin.Span>> getTraces(@Nullable QueryRequest request, @Nullable Long traceIdHigh,
@Nullable Long traceIdLow, boolean raw) {
if (traceIdHigh != null && !strictTraceId) traceIdHigh = null;
final Map<Pair<Long>, List<zipkin.Span>> spansWithoutAnnotations;
final Map<Row3<Long, Long, Long>, List<Record>> dbAnnotations;
try (Connection conn = datasource.getConnection()) {
Condition traceIdCondition = request != null
? schema.spanTraceIdCondition(toTraceIdQuery(context.get(conn), request))
: schema.spanTraceIdCondition(traceIdHigh, traceIdLow);
spansWithoutAnnotations = context.get(conn)
.select(schema.spanFields)
.from(ZIPKIN_SPANS).where(traceIdCondition)
.stream()
.map(r -> zipkin.Span.builder()
.traceIdHigh(maybeGet(r, ZIPKIN_SPANS.TRACE_ID_HIGH, 0L))
.traceId(r.getValue(ZIPKIN_SPANS.TRACE_ID))
.name(r.getValue(ZIPKIN_SPANS.NAME))
.id(r.getValue(ZIPKIN_SPANS.ID))
.parentId(r.getValue(ZIPKIN_SPANS.PARENT_ID))
.timestamp(r.getValue(ZIPKIN_SPANS.START_TS))
.duration(r.getValue(ZIPKIN_SPANS.DURATION))
.debug(r.getValue(ZIPKIN_SPANS.DEBUG))
.build())
.collect(
groupingBy((zipkin.Span s) -> Pair.create(s.traceIdHigh, s.traceId),
LinkedHashMap::new, Collectors.<zipkin.Span>toList()));
dbAnnotations = context.get(conn)
.select(schema.annotationFields)
.from(ZIPKIN_ANNOTATIONS)
.where(schema.annotationsTraceIdCondition(spansWithoutAnnotations.keySet()))
.orderBy(ZIPKIN_ANNOTATIONS.A_TIMESTAMP.asc(), ZIPKIN_ANNOTATIONS.A_KEY.asc())
.stream()
.collect(groupingBy((Record a) -> row(
maybeGet(a, ZIPKIN_ANNOTATIONS.TRACE_ID_HIGH, 0L),
a.getValue(ZIPKIN_ANNOTATIONS.TRACE_ID),
a.getValue(ZIPKIN_ANNOTATIONS.SPAN_ID)
), LinkedHashMap::new,
Collectors.<Record>toList())); // LinkedHashMap preserves order while grouping
} catch (SQLException e) {
throw new RuntimeException("Error querying for " + request + ": " + e.getMessage());
}
List<zipkin.Span> allSpans = new ArrayList<>(spansWithoutAnnotations.size());
for (List<zipkin.Span> spans : spansWithoutAnnotations.values()) {
for (zipkin.Span s : spans) {
zipkin.Span.Builder span = s.toBuilder();
Row3<Long, Long, Long> key = row(s.traceIdHigh, s.traceId, s.id);
if (dbAnnotations.containsKey(key)) {
for (Record a : dbAnnotations.get(key)) {
Endpoint endpoint = endpoint(a);
int type = a.getValue(ZIPKIN_ANNOTATIONS.A_TYPE);
if (type == -1) {
span.addAnnotation(Annotation.create(
a.getValue(ZIPKIN_ANNOTATIONS.A_TIMESTAMP),
a.getValue(ZIPKIN_ANNOTATIONS.A_KEY),
endpoint));
} else {
span.addBinaryAnnotation(BinaryAnnotation.create(
a.getValue(ZIPKIN_ANNOTATIONS.A_KEY),
a.getValue(ZIPKIN_ANNOTATIONS.A_VALUE),
Type.fromValue(type),
endpoint));
}
}
}
allSpans.add(span.build());
}
}
return GroupByTraceId.apply(allSpans, strictTraceId, !raw);
}
static <T> T maybeGet(Record record, TableField<Record, T> field, T defaultValue) {
if (record.fieldsRow().indexOf(field) < 0) {
return defaultValue;
} else {
return record.get(field);
}
}
@Override
public List<List<zipkin.Span>> getTraces(QueryRequest request) {
return getTraces(request, null, null, false);
}
@Override
public List<zipkin.Span> getTrace(long traceId) {
return getTrace(0L, traceId);
}
@Override public List<zipkin.Span> getTrace(long traceIdHigh, long traceIdLow) {
List<List<zipkin.Span>> result = getTraces(null, traceIdHigh, traceIdLow, false);
return result.isEmpty() ? null : result.get(0);
}
@Override
public List<zipkin.Span> getRawTrace(long traceId) {
return getRawTrace(0L, traceId);
}
@Override public List<zipkin.Span> getRawTrace(long traceIdHigh, long traceIdLow) {
List<List<zipkin.Span>> result = getTraces(null, traceIdHigh, traceIdLow, true);
return result.isEmpty() ? null : result.get(0);
}
@Override
public List<String> getServiceNames() {
try (Connection conn = datasource.getConnection()) {
return context.get(conn)
.selectDistinct(ZIPKIN_ANNOTATIONS.ENDPOINT_SERVICE_NAME)
.from(ZIPKIN_ANNOTATIONS)
.where(ZIPKIN_ANNOTATIONS.ENDPOINT_SERVICE_NAME.isNotNull()
.and(ZIPKIN_ANNOTATIONS.ENDPOINT_SERVICE_NAME.ne("")))
.fetch(ZIPKIN_ANNOTATIONS.ENDPOINT_SERVICE_NAME);
} catch (SQLException e) {
throw new RuntimeException("Error querying for " + e + ": " + e.getMessage());
}
}
@Override
public List<String> getSpanNames(String serviceName) {
if (serviceName == null) return emptyList();
serviceName = serviceName.toLowerCase(); // service names are always lowercase!
try (Connection conn = datasource.getConnection()) {
return context.get(conn)
.selectDistinct(ZIPKIN_SPANS.NAME)
.from(ZIPKIN_SPANS)
.join(ZIPKIN_ANNOTATIONS)
.on(ZIPKIN_SPANS.TRACE_ID.eq(ZIPKIN_ANNOTATIONS.TRACE_ID))
.and(ZIPKIN_SPANS.ID.eq(ZIPKIN_ANNOTATIONS.SPAN_ID))
.where(ZIPKIN_ANNOTATIONS.ENDPOINT_SERVICE_NAME.eq(serviceName))
.orderBy(ZIPKIN_SPANS.NAME)
.fetch(ZIPKIN_SPANS.NAME);
} catch (SQLException e) {
throw new RuntimeException("Error querying for " + serviceName + ": " + e.getMessage());
}
}
@Override
public List<DependencyLink> getDependencies(long endTs, @Nullable Long lookback) {
try (Connection conn = datasource.getConnection()) {
if (schema.hasPreAggregatedDependencies) {
List<Date> days = getDays(endTs, lookback);
List<DependencyLink> unmerged = context.get(conn)
.select(schema.dependencyLinkFields)
.from(ZIPKIN_DEPENDENCIES)
.where(ZIPKIN_DEPENDENCIES.DAY.in(days))
.fetch((Record l) -> DependencyLink.builder()
.parent(l.get(ZIPKIN_DEPENDENCIES.PARENT))
.child(l.get(ZIPKIN_DEPENDENCIES.CHILD))
.callCount(l.get(ZIPKIN_DEPENDENCIES.CALL_COUNT))
.errorCount(maybeGet(l, ZIPKIN_DEPENDENCIES.ERROR_COUNT, 0L))
.build()
);
return DependencyLinker.merge(unmerged);
} else {
return aggregateDependencies(endTs, lookback, conn);
}
} catch (SQLException e) {
throw new RuntimeException("Error querying dependencies for endTs "
+ endTs + " and lookback " + lookback + ": " + e.getMessage());
}
}
List<DependencyLink> aggregateDependencies(long endTs, @Nullable Long lookback, Connection conn) {
endTs = endTs * 1000;
// Lazy fetching the cursor prevents us from buffering the whole dataset in memory.
Cursor<Record> cursor = context.get(conn)
.selectDistinct(schema.dependencyLinkerFields)
// left joining allows us to keep a mapping of all span ids, not just ones that have
// special annotations. We need all span ids to reconstruct the trace tree. We need
// the whole trace tree so that we can accurately skip local spans.
.from(ZIPKIN_SPANS.leftJoin(ZIPKIN_ANNOTATIONS)
// NOTE: we are intentionally grouping only on the low-bits of trace id. This buys time
// for applications to upgrade to 128-bit instrumentation.
.on(ZIPKIN_SPANS.TRACE_ID.eq(ZIPKIN_ANNOTATIONS.TRACE_ID).and(
ZIPKIN_SPANS.ID.eq(ZIPKIN_ANNOTATIONS.SPAN_ID)))
.and(ZIPKIN_ANNOTATIONS.A_KEY.in(CLIENT_SEND, CLIENT_ADDR, SERVER_RECV, SERVER_ADDR, ERROR)))
.where(lookback == null ?
ZIPKIN_SPANS.START_TS.lessOrEqual(endTs) :
ZIPKIN_SPANS.START_TS.between(endTs - lookback * 1000, endTs))
// Grouping so that later code knows when a span or trace is finished.
.groupBy(schema.dependencyLinkerGroupByFields).fetchLazy();
Iterator<Iterator<Span>> traces =
new DependencyLinkV2SpanIterator.ByTraceId(cursor.iterator(), schema.hasTraceIdHigh);
if (!traces.hasNext()) return Collections.emptyList();
DependencyLinker linker = new DependencyLinker();
while (traces.hasNext()) {
linker.putTrace(traces.next());
}
return linker.link();
}
}
| 1 | 13,160 | guess I'm wondering if this needs to be refactored to use Schema.joinCondition() or similar? | openzipkin-zipkin | java |
@@ -53,6 +53,7 @@ module Beaker
class_option :'xml-time-order', :type => :boolean, :group => 'Beaker run'
class_option :'debug-errors', :type => :boolean, :group => 'Beaker run'
class_option :'exec_manual_tests', :type => :boolean, :group => 'Beaker run'
+ class_option :'test-tag-exclude', :type => :string, :group => 'Beaker run'
# The following are listed as deprecated in beaker --help, but needed now for
# feature parity for beaker 3.x. | 1 | require "thor"
require "fileutils"
require "beaker/subcommands/subcommand_util"
module Beaker
class Subcommand < Thor
SubcommandUtil = Beaker::Subcommands::SubcommandUtil
def initialize(*args)
super
FileUtils.mkdir_p(SubcommandUtil::CONFIG_DIR)
FileUtils.touch(SubcommandUtil::SUBCOMMAND_OPTIONS) unless SubcommandUtil::SUBCOMMAND_OPTIONS.exist?
FileUtils.touch(SubcommandUtil::SUBCOMMAND_STATE) unless SubcommandUtil::SUBCOMMAND_STATE.exist?
@cli = Beaker::CLI.new
end
# Options listed in this group 'Beaker run' are options that can be set on subcommands
# but are not processed by the subcommand itself. They are passed through so that when
# a Beaker::CLI object executes, it can pick up these options. Notably excluded from this
# group are `help` and `version`. Please note that whenever the command_line_parser.rb is
# updated, this list should also be updated as well.
class_option :'options-file', :aliases => '-o', :type => :string, :group => 'Beaker run'
class_option :helper, :type => :string, :group => 'Beaker run'
class_option :'load-path', :type => :string, :group => 'Beaker run'
class_option :tests, :aliases => '-t', :type => :string, :group => 'Beaker run'
class_option :'pre-suite', :type => :string, :group => 'Beaker run'
class_option :'post-suite', :type => :string, :group => 'Beaker run'
class_option :'pre-cleanup', :type => :string, :group => 'Beaker run'
class_option :'provision', :type => :boolean, :group => 'Beaker run'
class_option :'preserve-hosts', :type => :string, :group => 'Beaker run'
class_option :'root-keys', :type => :boolean, :group => 'Beaker run'
class_option :keyfile, :type => :string, :group => 'Beaker run'
class_option :timeout, :type => :string, :group => 'Beaker run'
class_option :install, :aliases => '-i', :type => :string, :group => 'Beaker run'
class_option :modules, :aliases => '-m', :type => :string, :group => 'Beaker run'
class_option :quiet, :aliases => '-q', :type => :boolean, :group => 'Beaker run'
class_option :color, :type => :boolean, :group => 'Beaker run'
class_option :'color-host-output', :type => :boolean, :group => 'Beaker run'
class_option :'log-level', :type => :string, :group => 'Beaker run'
class_option :'log-prefix', :type => :string, :group => 'Beaker run'
class_option :'dry-run', :type => :boolean, :group => 'Beaker run'
class_option :'fail-mode', :type => :string, :group => 'Beaker run'
class_option :ntp, :type => :boolean, :group => 'Beaker run'
class_option :'repo-proxy', :type => :boolean, :group => 'Beaker run'
class_option :'add-el-extras', :type => :boolean, :group => 'Beaker run'
class_option :'package-proxy', :type => :string, :group => 'Beaker run'
class_option :'validate', :type => :boolean, :group => 'Beaker run'
class_option :'collect-perf-data', :type => :boolean, :group => 'Beaker run'
class_option :'parse-only', :type => :boolean, :group => 'Beaker run'
class_option :tag, :type => :string, :group => 'Beaker run'
class_option :'exclude-tags', :type => :string, :group => 'Beaker run'
class_option :'xml-time-order', :type => :boolean, :group => 'Beaker run'
class_option :'debug-errors', :type => :boolean, :group => 'Beaker run'
class_option :'exec_manual_tests', :type => :boolean, :group => 'Beaker run'
# The following are listed as deprecated in beaker --help, but needed now for
# feature parity for beaker 3.x.
class_option :xml, :type => :boolean, :group => "Beaker run"
class_option :type, :type => :string, :group => "Beaker run"
class_option :debug, :type => :boolean, :group => "Beaker run"
desc "init BEAKER_RUN_OPTIONS", "Initializes the required configuration for Beaker subcommand execution"
long_desc <<-LONGDESC
Initializes the required .beaker configuration folder. This folder contains
a subcommand_options.yaml file that is user-facing; altering this file will
alter the options subcommand execution. Subsequent subcommand execution,
such as `provision`, will result in beaker making modifications to this file
as necessary.
LONGDESC
option :help, :type => :boolean, :hide => true
method_option :hosts, :aliases => '-h', :type => :string, :required => true
def init()
if options[:help]
invoke :help, [], ["init"]
return
end
@cli.parse_options
# delete unnecessary keys for saving the options
options_to_write = @cli.configured_options
# Remove keys we don't want to save
[:timestamp, :logger, :command_line, :beaker_version, :hosts_file].each do |key|
options_to_write.delete(key)
end
options_to_write = SubcommandUtil.sanitize_options_for_save(options_to_write)
@cli.logger.notify 'Writing configured options to disk'
File.open(SubcommandUtil::SUBCOMMAND_OPTIONS, 'w') do |f|
f.write(options_to_write.to_yaml)
end
@cli.logger.notify "Options written to #{SubcommandUtil::SUBCOMMAND_OPTIONS}"
state = YAML::Store.new(SubcommandUtil::SUBCOMMAND_STATE)
state.transaction do
state['provisioned'] = false
end
end
desc "provision", "Provisions the beaker systems under test(SUTs)"
long_desc <<-LONGDESC
Provisions hosts defined in your subcommand_options file. You can pass the --hosts
flag here to override any hosts provided there. Really, you can pass most any beaker
flag here to override.
LONGDESC
option :help, :type => :boolean, :hide => true
def provision()
if options[:help]
invoke :help, [], ["provision"]
return
end
state = YAML::Store.new(SubcommandUtil::SUBCOMMAND_STATE)
if state.transaction { state['provisioned']}
SubcommandUtil.error_with('Provisioned SUTs detected. Please destroy and reprovision.')
end
@cli.parse_options
@cli.provision
# Sanitize the hosts
cleaned_hosts = SubcommandUtil.sanitize_options_for_save(@cli.combined_instance_and_options_hosts)
# Update each host provisioned with a flag indicating that it no longer needs
# provisioning
cleaned_hosts.each do |host, host_hash|
host_hash['provision'] = false
end
# should we only update the options here with the new host? Or update the settings
# with whatever new flags may have been provided with provision?
options_storage = YAML::Store.new(SubcommandUtil::SUBCOMMAND_OPTIONS)
options_storage.transaction do
@cli.logger.notify 'updating HOSTS key in subcommand_options'
options_storage['HOSTS'] = cleaned_hosts
options_storage['hosts_preserved_yaml_file'] = @cli.options[:hosts_preserved_yaml_file]
end
@cli.preserve_hosts_file
state.transaction do
state['provisioned'] = true
end
end
desc 'exec FILE/BEAKER_SUITE', 'execute a directory, file, or beaker suite'
long_desc <<-LONG_DESC
Run a single file, directory, or beaker suite. If supplied a file or directory,
that resource will be run in the context of the `tests` suite; If supplied a beaker
suite, then just that suite will run. If no resource is supplied, then this command
executes the suites as they are defined in the configuration.
LONG_DESC
option :help, :type => :boolean, :hide => true
def exec(resource=nil)
if options[:help]
invoke :help, [], ["exec"]
return
end
@cli.parse_options
@cli.initialize_network_manager
if !resource
@cli.execute!
return
end
beaker_suites = [:pre_suite, :tests, :post_suite, :pre_cleanup]
if Pathname(resource).exist?
# If we determine the resource is a valid file resource, then we empty
# all the suites and run that file resource in the tests suite. In the
# future, when we have the ability to have custom suites, we should change
# this to run in a custom suite. You know, in the future.
beaker_suites.each do |suite|
@cli.options[suite] = []
end
if Pathname(resource).directory?
@cli.options[:tests] = Dir.glob("#{Pathname(resource).expand_path}/*.rb")
else
@cli.options[:tests] = [Pathname(resource).expand_path.to_s]
end
elsif resource.match(/pre-suite|tests|post-suite|pre-cleanup/)
# The regex match here is loose so that users can supply multiple suites,
# such as `beaker exec pre-suite,tests`.
beaker_suites.each do |suite|
@cli.options[suite] = [] unless resource.gsub(/-/, '_').match(suite.to_s)
end
else
raise ArgumentError, "Unable to parse #{resource} with beaker exec"
end
@cli.execute!
end
desc "destroy", "Destroys the provisioned VMs"
long_desc <<-LONG_DESC
Destroys the currently provisioned VMs
LONG_DESC
option :help, :type => :boolean, :hide => true
def destroy()
if options[:help]
invoke :help, [], ["destroy"]
return
end
state = YAML::Store.new(SubcommandUtil::SUBCOMMAND_STATE)
unless state.transaction { state['provisioned']}
SubcommandUtil.error_with('Please provision an environment')
end
@cli.parse_options
@cli.options[:provision] = false
@cli.initialize_network_manager
@cli.network_manager.cleanup
state.transaction {
state.delete('provisioned')
}
end
end
end
| 1 | 15,755 | Does it make sense to restrict this option to `exec` only? You could add it specific to that subcommand using the `method_option`...method. There's an example of it for hosts in the `init` function. | voxpupuli-beaker | rb |
@@ -143,7 +143,10 @@ namespace OpenTelemetry.Context.Propagation
break;
}
- if (NameValueHeaderValue.TryParse(pair, out NameValueHeaderValue baggageItem))
+ var decodedPair = WebUtility.UrlDecode(pair);
+ var escapedPair = string.Join("=", decodedPair.Split('=').Select(Uri.EscapeDataString));
+
+ if (NameValueHeaderValue.TryParse(escapedPair, out NameValueHeaderValue baggageItem))
{
if (string.IsNullOrEmpty(baggageItem.Name) || string.IsNullOrEmpty(baggageItem.Value))
{ | 1 | // <copyright file="BaggagePropagator.cs" company="OpenTelemetry Authors">
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// </copyright>
using System;
using System.Collections.Generic;
using System.Linq;
using System.Net;
using System.Text;
using OpenTelemetry.Internal;
namespace OpenTelemetry.Context.Propagation
{
/// <summary>
/// A text map propagator for W3C Baggage. See https://w3c.github.io/baggage/.
/// </summary>
public class BaggagePropagator : TextMapPropagator
{
internal const string BaggageHeaderName = "baggage";
private const int MaxBaggageLength = 8192;
private const int MaxBaggageItems = 180;
/// <inheritdoc/>
public override ISet<string> Fields => new HashSet<string> { BaggageHeaderName };
/// <inheritdoc/>
public override PropagationContext Extract<T>(PropagationContext context, T carrier, Func<T, string, IEnumerable<string>> getter)
{
if (context.Baggage != default)
{
// If baggage has already been extracted, perform a noop.
return context;
}
if (carrier == null)
{
OpenTelemetryApiEventSource.Log.FailedToExtractBaggage(nameof(BaggagePropagator), "null carrier");
return context;
}
if (getter == null)
{
OpenTelemetryApiEventSource.Log.FailedToExtractBaggage(nameof(BaggagePropagator), "null getter");
return context;
}
try
{
Dictionary<string, string> baggage = null;
var baggageCollection = getter(carrier, BaggageHeaderName);
if (baggageCollection?.Any() ?? false)
{
TryExtractBaggage(baggageCollection.ToArray(), out baggage);
}
return new PropagationContext(
context.ActivityContext,
baggage == null ? context.Baggage : new Baggage(baggage));
}
catch (Exception ex)
{
OpenTelemetryApiEventSource.Log.BaggageExtractException(nameof(BaggagePropagator), ex);
}
return context;
}
/// <inheritdoc/>
public override void Inject<T>(PropagationContext context, T carrier, Action<T, string, string> setter)
{
if (carrier == null)
{
OpenTelemetryApiEventSource.Log.FailedToInjectBaggage(nameof(BaggagePropagator), "null carrier");
return;
}
if (setter == null)
{
OpenTelemetryApiEventSource.Log.FailedToInjectBaggage(nameof(BaggagePropagator), "null setter");
return;
}
using var e = context.Baggage.GetEnumerator();
if (e.MoveNext() == true)
{
int itemCount = 0;
StringBuilder baggage = new StringBuilder();
do
{
KeyValuePair<string, string> item = e.Current;
if (string.IsNullOrEmpty(item.Value))
{
continue;
}
baggage.Append(WebUtility.UrlEncode(item.Key)).Append('=').Append(WebUtility.UrlEncode(item.Value)).Append(',');
}
while (e.MoveNext() && ++itemCount < MaxBaggageItems && baggage.Length < MaxBaggageLength);
baggage.Remove(baggage.Length - 1, 1);
setter(carrier, BaggageHeaderName, baggage.ToString());
}
}
internal static bool TryExtractBaggage(string[] baggageCollection, out Dictionary<string, string> baggage)
{
int baggageLength = -1;
bool done = false;
Dictionary<string, string> baggageDictionary = null;
foreach (var item in baggageCollection)
{
if (done)
{
break;
}
if (string.IsNullOrEmpty(item))
{
continue;
}
foreach (var pair in item.Split(','))
{
baggageLength += pair.Length + 1; // pair and comma
if (baggageLength >= MaxBaggageLength || baggageDictionary?.Count >= MaxBaggageItems)
{
done = true;
break;
}
if (NameValueHeaderValue.TryParse(pair, out NameValueHeaderValue baggageItem))
{
if (string.IsNullOrEmpty(baggageItem.Name) || string.IsNullOrEmpty(baggageItem.Value))
{
continue;
}
if (baggageDictionary == null)
{
baggageDictionary = new Dictionary<string, string>();
}
baggageDictionary[baggageItem.Name] = baggageItem.Value;
}
}
}
baggage = baggageDictionary;
return baggageDictionary != null;
}
}
}
| 1 | 20,026 | This is very expensive (lots of allocations, lots of data copying). Could we do something like check IndexOf('%') and bypass if no hit? | open-telemetry-opentelemetry-dotnet | .cs |
@@ -55,10 +55,17 @@ public class PvPUtil
public static boolean isAttackable(Client client, Player player)
{
int wildernessLevel = 0;
- if (!(client.getVar(Varbits.IN_WILDERNESS) == 1 || WorldType.isPvpWorld(client.getWorldType())))
+ if (!(client.getVar(Varbits.IN_WILDERNESS) == 1
+ || WorldType.isPvpWorld(client.getWorldType())
+ || client.getWorldType().contains(WorldType.DEADMAN)
+ ))
{
return false;
}
+ if (client.getWorldType().contains(WorldType.DEADMAN))
+ {
+ return true;
+ }
if (WorldType.isPvpWorld(client.getWorldType()))
{
if (client.getVar(Varbits.IN_WILDERNESS) != 1) | 1 | /*
* Copyright (c) 2019. PKLite - All Rights Reserved
* Unauthorized modification, distribution, or possession of this source file, via any medium is strictly prohibited.
* Proprietary and confidential. Refer to PKLite License file for more information on
* full terms of this copyright and to determine what constitutes authorized use.
* Written by PKLite(ST0NEWALL, others) <[email protected]>, 2019
*
*/
package net.runelite.client.util;
import java.util.Comparator;
import java.util.Objects;
import java.util.TreeMap;
import net.runelite.api.Client;
import net.runelite.api.InventoryID;
import net.runelite.api.Item;
import net.runelite.api.ItemDefinition;
import net.runelite.api.Player;
import net.runelite.api.Varbits;
import net.runelite.api.WorldType;
import net.runelite.api.coords.WorldPoint;
import net.runelite.client.game.ItemManager;
import org.apache.commons.lang3.ArrayUtils;
/**
*
*/
public class PvPUtil
{
/**
* Gets the wilderness level based on a world point
*
* @param point the point in the world to get the wilderness level for
* @return the int representing the wilderness level
*/
public static int getWildernessLevelFrom(WorldPoint point)
{
int y = point.getY();
int underLevel = ((y - 9920) / 8) + 1;
int upperLevel = ((y - 3520) / 8) + 1;
return y > 6400 ? underLevel : upperLevel;
}
/**
* Determines if another player is attackable based off of wilderness level and combat levels
*
* @param client The client of the local player
* @param player the player to determine attackability
* @return returns true if the player is attackable, false otherwise
*/
public static boolean isAttackable(Client client, Player player)
{
int wildernessLevel = 0;
if (!(client.getVar(Varbits.IN_WILDERNESS) == 1 || WorldType.isPvpWorld(client.getWorldType())))
{
return false;
}
if (WorldType.isPvpWorld(client.getWorldType()))
{
if (client.getVar(Varbits.IN_WILDERNESS) != 1)
{
return Math.abs(client.getLocalPlayer().getCombatLevel() - player.getCombatLevel()) <= 15;
}
wildernessLevel = 15;
}
return Math.abs(client.getLocalPlayer().getCombatLevel() - player.getCombatLevel())
< (getWildernessLevelFrom(client.getLocalPlayer().getWorldLocation()) + wildernessLevel);
}
public static int calculateRisk(Client client, ItemManager itemManager)
{
if (client.getItemContainer(InventoryID.EQUIPMENT) == null)
{
return 0;
}
if (client.getItemContainer(InventoryID.INVENTORY).getItems() == null)
{
return 0;
}
Item[] items = ArrayUtils.addAll(Objects.requireNonNull(client.getItemContainer(InventoryID.EQUIPMENT)).getItems(),
Objects.requireNonNull(client.getItemContainer(InventoryID.INVENTORY)).getItems());
TreeMap<Integer, Item> priceMap = new TreeMap<>(Comparator.comparingInt(Integer::intValue));
int wealth = 0;
for (Item i : items)
{
int value = (itemManager.getItemPrice(i.getId()) * i.getQuantity());
final ItemDefinition itemComposition = itemManager.getItemDefinition(i.getId());
if (!itemComposition.isTradeable() && value == 0)
{
value = itemComposition.getPrice() * i.getQuantity();
priceMap.put(value, i);
}
else
{
value = itemManager.getItemPrice(i.getId()) * i.getQuantity();
if (i.getId() > 0 && value > 0)
{
priceMap.put(value, i);
}
}
wealth += value;
}
return Integer.parseInt(QuantityFormatter.quantityToRSDecimalStack(priceMap.keySet().stream().mapToInt(Integer::intValue).sum()));
}
}
| 1 | 16,191 | This should be `WorldType.isDeadmanWorld(client.getWorldType())` to be inline with the other WorldType calls. | open-osrs-runelite | java |
@@ -600,11 +600,11 @@ class SABLRetinaHead(BaseDenseHead, BBoxTestMixin):
bbox_cls_pred.contiguous(),
bbox_reg_pred.contiguous()
]
- bboxes, confids = self.bbox_coder.decode(
+ bboxes, confidences = self.bbox_coder.decode(
anchors.contiguous(), bbox_preds, max_shape=img_shape)
mlvl_bboxes.append(bboxes)
mlvl_scores.append(scores)
- mlvl_confids.append(confids)
+ mlvl_confids.append(confidences)
mlvl_bboxes = torch.cat(mlvl_bboxes)
if rescale:
mlvl_bboxes /= mlvl_bboxes.new_tensor(scale_factor) | 1 | # Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import torch
import torch.nn as nn
from mmcv.cnn import ConvModule
from mmcv.runner import force_fp32
from mmdet.core import (build_anchor_generator, build_assigner,
build_bbox_coder, build_sampler, images_to_levels,
multi_apply, multiclass_nms, unmap)
from ..builder import HEADS, build_loss
from .base_dense_head import BaseDenseHead
from .dense_test_mixins import BBoxTestMixin
from .guided_anchor_head import GuidedAnchorHead
@HEADS.register_module()
class SABLRetinaHead(BaseDenseHead, BBoxTestMixin):
"""Side-Aware Boundary Localization (SABL) for RetinaNet.
The anchor generation, assigning and sampling in SABLRetinaHead
are the same as GuidedAnchorHead for guided anchoring.
Please refer to https://arxiv.org/abs/1912.04260 for more details.
Args:
num_classes (int): Number of classes.
in_channels (int): Number of channels in the input feature map.
stacked_convs (int): Number of Convs for classification \
and regression branches. Defaults to 4.
feat_channels (int): Number of hidden channels. \
Defaults to 256.
approx_anchor_generator (dict): Config dict for approx generator.
square_anchor_generator (dict): Config dict for square generator.
conv_cfg (dict): Config dict for ConvModule. Defaults to None.
norm_cfg (dict): Config dict for Norm Layer. Defaults to None.
bbox_coder (dict): Config dict for bbox coder.
reg_decoded_bbox (bool): If true, the regression loss would be
applied directly on decoded bounding boxes, converting both
the predicted boxes and regression targets to absolute
coordinates format. Default False. It should be `True` when
using `IoULoss`, `GIoULoss`, or `DIoULoss` in the bbox head.
train_cfg (dict): Training config of SABLRetinaHead.
test_cfg (dict): Testing config of SABLRetinaHead.
loss_cls (dict): Config of classification loss.
loss_bbox_cls (dict): Config of classification loss for bbox branch.
loss_bbox_reg (dict): Config of regression loss for bbox branch.
init_cfg (dict or list[dict], optional): Initialization config dict.
"""
def __init__(self,
num_classes,
in_channels,
stacked_convs=4,
feat_channels=256,
approx_anchor_generator=dict(
type='AnchorGenerator',
octave_base_scale=4,
scales_per_octave=3,
ratios=[0.5, 1.0, 2.0],
strides=[8, 16, 32, 64, 128]),
square_anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
scales=[4],
strides=[8, 16, 32, 64, 128]),
conv_cfg=None,
norm_cfg=None,
bbox_coder=dict(
type='BucketingBBoxCoder',
num_buckets=14,
scale_factor=3.0),
reg_decoded_bbox=False,
train_cfg=None,
test_cfg=None,
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=True,
loss_weight=1.5),
loss_bbox_reg=dict(
type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.5),
init_cfg=dict(
type='Normal',
layer='Conv2d',
std=0.01,
override=dict(
type='Normal',
name='retina_cls',
std=0.01,
bias_prob=0.01))):
super(SABLRetinaHead, self).__init__(init_cfg)
self.in_channels = in_channels
self.num_classes = num_classes
self.feat_channels = feat_channels
self.num_buckets = bbox_coder['num_buckets']
self.side_num = int(np.ceil(self.num_buckets / 2))
assert (approx_anchor_generator['octave_base_scale'] ==
square_anchor_generator['scales'][0])
assert (approx_anchor_generator['strides'] ==
square_anchor_generator['strides'])
self.approx_anchor_generator = build_anchor_generator(
approx_anchor_generator)
self.square_anchor_generator = build_anchor_generator(
square_anchor_generator)
self.approxs_per_octave = (
self.approx_anchor_generator.num_base_anchors[0])
# one anchor per location
self.num_anchors = 1
self.stacked_convs = stacked_convs
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.reg_decoded_bbox = reg_decoded_bbox
self.use_sigmoid_cls = loss_cls.get('use_sigmoid', False)
self.sampling = loss_cls['type'] not in [
'FocalLoss', 'GHMC', 'QualityFocalLoss'
]
if self.use_sigmoid_cls:
self.cls_out_channels = num_classes
else:
self.cls_out_channels = num_classes + 1
self.bbox_coder = build_bbox_coder(bbox_coder)
self.loss_cls = build_loss(loss_cls)
self.loss_bbox_cls = build_loss(loss_bbox_cls)
self.loss_bbox_reg = build_loss(loss_bbox_reg)
self.train_cfg = train_cfg
self.test_cfg = test_cfg
if self.train_cfg:
self.assigner = build_assigner(self.train_cfg.assigner)
# use PseudoSampler when sampling is False
if self.sampling and hasattr(self.train_cfg, 'sampler'):
sampler_cfg = self.train_cfg.sampler
else:
sampler_cfg = dict(type='PseudoSampler')
self.sampler = build_sampler(sampler_cfg, context=self)
self.fp16_enabled = False
self._init_layers()
def _init_layers(self):
self.relu = nn.ReLU(inplace=True)
self.cls_convs = nn.ModuleList()
self.reg_convs = nn.ModuleList()
for i in range(self.stacked_convs):
chn = self.in_channels if i == 0 else self.feat_channels
self.cls_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg))
self.reg_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg))
self.retina_cls = nn.Conv2d(
self.feat_channels, self.cls_out_channels, 3, padding=1)
self.retina_bbox_reg = nn.Conv2d(
self.feat_channels, self.side_num * 4, 3, padding=1)
self.retina_bbox_cls = nn.Conv2d(
self.feat_channels, self.side_num * 4, 3, padding=1)
def forward_single(self, x):
cls_feat = x
reg_feat = x
for cls_conv in self.cls_convs:
cls_feat = cls_conv(cls_feat)
for reg_conv in self.reg_convs:
reg_feat = reg_conv(reg_feat)
cls_score = self.retina_cls(cls_feat)
bbox_cls_pred = self.retina_bbox_cls(reg_feat)
bbox_reg_pred = self.retina_bbox_reg(reg_feat)
bbox_pred = (bbox_cls_pred, bbox_reg_pred)
return cls_score, bbox_pred
def forward(self, feats):
return multi_apply(self.forward_single, feats)
def get_anchors(self, featmap_sizes, img_metas, device='cuda'):
"""Get squares according to feature map sizes and guided anchors.
Args:
featmap_sizes (list[tuple]): Multi-level feature map sizes.
img_metas (list[dict]): Image meta info.
device (torch.device | str): device for returned tensors
Returns:
tuple: square approxs of each image
"""
num_imgs = len(img_metas)
# since feature map sizes of all images are the same, we only compute
# squares for one time
multi_level_squares = self.square_anchor_generator.grid_anchors(
featmap_sizes, device=device)
squares_list = [multi_level_squares for _ in range(num_imgs)]
return squares_list
def get_target(self,
approx_list,
inside_flag_list,
square_list,
gt_bboxes_list,
img_metas,
gt_bboxes_ignore_list=None,
gt_labels_list=None,
label_channels=None,
sampling=True,
unmap_outputs=True):
"""Compute bucketing targets.
Args:
approx_list (list[list]): Multi level approxs of each image.
inside_flag_list (list[list]): Multi level inside flags of each
image.
square_list (list[list]): Multi level squares of each image.
gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image.
img_metas (list[dict]): Meta info of each image.
gt_bboxes_ignore_list (list[Tensor]): ignore list of gt bboxes.
gt_bboxes_list (list[Tensor]): Gt bboxes of each image.
label_channels (int): Channel of label.
sampling (bool): Sample Anchors or not.
unmap_outputs (bool): unmap outputs or not.
Returns:
tuple: Returns a tuple containing learning targets.
- labels_list (list[Tensor]): Labels of each level.
- label_weights_list (list[Tensor]): Label weights of each \
level.
- bbox_cls_targets_list (list[Tensor]): BBox cls targets of \
each level.
- bbox_cls_weights_list (list[Tensor]): BBox cls weights of \
each level.
- bbox_reg_targets_list (list[Tensor]): BBox reg targets of \
each level.
- bbox_reg_weights_list (list[Tensor]): BBox reg weights of \
each level.
- num_total_pos (int): Number of positive samples in all \
images.
- num_total_neg (int): Number of negative samples in all \
images.
"""
num_imgs = len(img_metas)
assert len(approx_list) == len(inside_flag_list) == len(
square_list) == num_imgs
# anchor number of multi levels
num_level_squares = [squares.size(0) for squares in square_list[0]]
# concat all level anchors and flags to a single tensor
inside_flag_flat_list = []
approx_flat_list = []
square_flat_list = []
for i in range(num_imgs):
assert len(square_list[i]) == len(inside_flag_list[i])
inside_flag_flat_list.append(torch.cat(inside_flag_list[i]))
approx_flat_list.append(torch.cat(approx_list[i]))
square_flat_list.append(torch.cat(square_list[i]))
# compute targets for each image
if gt_bboxes_ignore_list is None:
gt_bboxes_ignore_list = [None for _ in range(num_imgs)]
if gt_labels_list is None:
gt_labels_list = [None for _ in range(num_imgs)]
(all_labels, all_label_weights, all_bbox_cls_targets,
all_bbox_cls_weights, all_bbox_reg_targets, all_bbox_reg_weights,
pos_inds_list, neg_inds_list) = multi_apply(
self._get_target_single,
approx_flat_list,
inside_flag_flat_list,
square_flat_list,
gt_bboxes_list,
gt_bboxes_ignore_list,
gt_labels_list,
img_metas,
label_channels=label_channels,
sampling=sampling,
unmap_outputs=unmap_outputs)
# no valid anchors
if any([labels is None for labels in all_labels]):
return None
# sampled anchors of all images
num_total_pos = sum([max(inds.numel(), 1) for inds in pos_inds_list])
num_total_neg = sum([max(inds.numel(), 1) for inds in neg_inds_list])
# split targets to a list w.r.t. multiple levels
labels_list = images_to_levels(all_labels, num_level_squares)
label_weights_list = images_to_levels(all_label_weights,
num_level_squares)
bbox_cls_targets_list = images_to_levels(all_bbox_cls_targets,
num_level_squares)
bbox_cls_weights_list = images_to_levels(all_bbox_cls_weights,
num_level_squares)
bbox_reg_targets_list = images_to_levels(all_bbox_reg_targets,
num_level_squares)
bbox_reg_weights_list = images_to_levels(all_bbox_reg_weights,
num_level_squares)
return (labels_list, label_weights_list, bbox_cls_targets_list,
bbox_cls_weights_list, bbox_reg_targets_list,
bbox_reg_weights_list, num_total_pos, num_total_neg)
def _get_target_single(self,
flat_approxs,
inside_flags,
flat_squares,
gt_bboxes,
gt_bboxes_ignore,
gt_labels,
img_meta,
label_channels=None,
sampling=True,
unmap_outputs=True):
"""Compute regression and classification targets for anchors in a
single image.
Args:
flat_approxs (Tensor): flat approxs of a single image,
shape (n, 4)
inside_flags (Tensor): inside flags of a single image,
shape (n, ).
flat_squares (Tensor): flat squares of a single image,
shape (approxs_per_octave * n, 4)
gt_bboxes (Tensor): Ground truth bboxes of a single image, \
shape (num_gts, 4).
gt_bboxes_ignore (Tensor): Ground truth bboxes to be
ignored, shape (num_ignored_gts, 4).
gt_labels (Tensor): Ground truth labels of each box,
shape (num_gts,).
img_meta (dict): Meta info of the image.
label_channels (int): Channel of label.
sampling (bool): Sample Anchors or not.
unmap_outputs (bool): unmap outputs or not.
Returns:
tuple:
- labels_list (Tensor): Labels in a single image
- label_weights (Tensor): Label weights in a single image
- bbox_cls_targets (Tensor): BBox cls targets in a single image
- bbox_cls_weights (Tensor): BBox cls weights in a single image
- bbox_reg_targets (Tensor): BBox reg targets in a single image
- bbox_reg_weights (Tensor): BBox reg weights in a single image
- num_total_pos (int): Number of positive samples \
in a single image
- num_total_neg (int): Number of negative samples \
in a single image
"""
if not inside_flags.any():
return (None, ) * 8
# assign gt and sample anchors
expand_inside_flags = inside_flags[:, None].expand(
-1, self.approxs_per_octave).reshape(-1)
approxs = flat_approxs[expand_inside_flags, :]
squares = flat_squares[inside_flags, :]
assign_result = self.assigner.assign(approxs, squares,
self.approxs_per_octave,
gt_bboxes, gt_bboxes_ignore)
sampling_result = self.sampler.sample(assign_result, squares,
gt_bboxes)
num_valid_squares = squares.shape[0]
bbox_cls_targets = squares.new_zeros(
(num_valid_squares, self.side_num * 4))
bbox_cls_weights = squares.new_zeros(
(num_valid_squares, self.side_num * 4))
bbox_reg_targets = squares.new_zeros(
(num_valid_squares, self.side_num * 4))
bbox_reg_weights = squares.new_zeros(
(num_valid_squares, self.side_num * 4))
labels = squares.new_full((num_valid_squares, ),
self.num_classes,
dtype=torch.long)
label_weights = squares.new_zeros(num_valid_squares, dtype=torch.float)
pos_inds = sampling_result.pos_inds
neg_inds = sampling_result.neg_inds
if len(pos_inds) > 0:
(pos_bbox_reg_targets, pos_bbox_reg_weights, pos_bbox_cls_targets,
pos_bbox_cls_weights) = self.bbox_coder.encode(
sampling_result.pos_bboxes, sampling_result.pos_gt_bboxes)
bbox_cls_targets[pos_inds, :] = pos_bbox_cls_targets
bbox_reg_targets[pos_inds, :] = pos_bbox_reg_targets
bbox_cls_weights[pos_inds, :] = pos_bbox_cls_weights
bbox_reg_weights[pos_inds, :] = pos_bbox_reg_weights
if gt_labels is None:
# Only rpn gives gt_labels as None
# Foreground is the first class
labels[pos_inds] = 0
else:
labels[pos_inds] = gt_labels[
sampling_result.pos_assigned_gt_inds]
if self.train_cfg.pos_weight <= 0:
label_weights[pos_inds] = 1.0
else:
label_weights[pos_inds] = self.train_cfg.pos_weight
if len(neg_inds) > 0:
label_weights[neg_inds] = 1.0
# map up to original set of anchors
if unmap_outputs:
num_total_anchors = flat_squares.size(0)
labels = unmap(
labels, num_total_anchors, inside_flags, fill=self.num_classes)
label_weights = unmap(label_weights, num_total_anchors,
inside_flags)
bbox_cls_targets = unmap(bbox_cls_targets, num_total_anchors,
inside_flags)
bbox_cls_weights = unmap(bbox_cls_weights, num_total_anchors,
inside_flags)
bbox_reg_targets = unmap(bbox_reg_targets, num_total_anchors,
inside_flags)
bbox_reg_weights = unmap(bbox_reg_weights, num_total_anchors,
inside_flags)
return (labels, label_weights, bbox_cls_targets, bbox_cls_weights,
bbox_reg_targets, bbox_reg_weights, pos_inds, neg_inds)
def loss_single(self, cls_score, bbox_pred, labels, label_weights,
bbox_cls_targets, bbox_cls_weights, bbox_reg_targets,
bbox_reg_weights, num_total_samples):
# classification loss
labels = labels.reshape(-1)
label_weights = label_weights.reshape(-1)
cls_score = cls_score.permute(0, 2, 3,
1).reshape(-1, self.cls_out_channels)
loss_cls = self.loss_cls(
cls_score, labels, label_weights, avg_factor=num_total_samples)
# regression loss
bbox_cls_targets = bbox_cls_targets.reshape(-1, self.side_num * 4)
bbox_cls_weights = bbox_cls_weights.reshape(-1, self.side_num * 4)
bbox_reg_targets = bbox_reg_targets.reshape(-1, self.side_num * 4)
bbox_reg_weights = bbox_reg_weights.reshape(-1, self.side_num * 4)
(bbox_cls_pred, bbox_reg_pred) = bbox_pred
bbox_cls_pred = bbox_cls_pred.permute(0, 2, 3, 1).reshape(
-1, self.side_num * 4)
bbox_reg_pred = bbox_reg_pred.permute(0, 2, 3, 1).reshape(
-1, self.side_num * 4)
loss_bbox_cls = self.loss_bbox_cls(
bbox_cls_pred,
bbox_cls_targets.long(),
bbox_cls_weights,
avg_factor=num_total_samples * 4 * self.side_num)
loss_bbox_reg = self.loss_bbox_reg(
bbox_reg_pred,
bbox_reg_targets,
bbox_reg_weights,
avg_factor=num_total_samples * 4 * self.bbox_coder.offset_topk)
return loss_cls, loss_bbox_cls, loss_bbox_reg
@force_fp32(apply_to=('cls_scores', 'bbox_preds'))
def loss(self,
cls_scores,
bbox_preds,
gt_bboxes,
gt_labels,
img_metas,
gt_bboxes_ignore=None):
featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
assert len(featmap_sizes) == self.approx_anchor_generator.num_levels
device = cls_scores[0].device
# get sampled approxes
approxs_list, inside_flag_list = GuidedAnchorHead.get_sampled_approxs(
self, featmap_sizes, img_metas, device=device)
square_list = self.get_anchors(featmap_sizes, img_metas, device=device)
label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1
cls_reg_targets = self.get_target(
approxs_list,
inside_flag_list,
square_list,
gt_bboxes,
img_metas,
gt_bboxes_ignore_list=gt_bboxes_ignore,
gt_labels_list=gt_labels,
label_channels=label_channels,
sampling=self.sampling)
if cls_reg_targets is None:
return None
(labels_list, label_weights_list, bbox_cls_targets_list,
bbox_cls_weights_list, bbox_reg_targets_list, bbox_reg_weights_list,
num_total_pos, num_total_neg) = cls_reg_targets
num_total_samples = (
num_total_pos + num_total_neg if self.sampling else num_total_pos)
losses_cls, losses_bbox_cls, losses_bbox_reg = multi_apply(
self.loss_single,
cls_scores,
bbox_preds,
labels_list,
label_weights_list,
bbox_cls_targets_list,
bbox_cls_weights_list,
bbox_reg_targets_list,
bbox_reg_weights_list,
num_total_samples=num_total_samples)
return dict(
loss_cls=losses_cls,
loss_bbox_cls=losses_bbox_cls,
loss_bbox_reg=losses_bbox_reg)
@force_fp32(apply_to=('cls_scores', 'bbox_preds'))
def get_bboxes(self,
cls_scores,
bbox_preds,
img_metas,
cfg=None,
rescale=False):
assert len(cls_scores) == len(bbox_preds)
num_levels = len(cls_scores)
featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
device = cls_scores[0].device
mlvl_anchors = self.get_anchors(
featmap_sizes, img_metas, device=device)
result_list = []
for img_id in range(len(img_metas)):
cls_score_list = [
cls_scores[i][img_id].detach() for i in range(num_levels)
]
bbox_cls_pred_list = [
bbox_preds[i][0][img_id].detach() for i in range(num_levels)
]
bbox_reg_pred_list = [
bbox_preds[i][1][img_id].detach() for i in range(num_levels)
]
img_shape = img_metas[img_id]['img_shape']
scale_factor = img_metas[img_id]['scale_factor']
proposals = self.get_bboxes_single(cls_score_list,
bbox_cls_pred_list,
bbox_reg_pred_list,
mlvl_anchors[img_id], img_shape,
scale_factor, cfg, rescale)
result_list.append(proposals)
return result_list
def get_bboxes_single(self,
cls_scores,
bbox_cls_preds,
bbox_reg_preds,
mlvl_anchors,
img_shape,
scale_factor,
cfg,
rescale=False):
cfg = self.test_cfg if cfg is None else cfg
mlvl_bboxes = []
mlvl_scores = []
mlvl_confids = []
assert len(cls_scores) == len(bbox_cls_preds) == len(
bbox_reg_preds) == len(mlvl_anchors)
for cls_score, bbox_cls_pred, bbox_reg_pred, anchors in zip(
cls_scores, bbox_cls_preds, bbox_reg_preds, mlvl_anchors):
assert cls_score.size()[-2:] == bbox_cls_pred.size(
)[-2:] == bbox_reg_pred.size()[-2::]
cls_score = cls_score.permute(1, 2,
0).reshape(-1, self.cls_out_channels)
if self.use_sigmoid_cls:
scores = cls_score.sigmoid()
else:
scores = cls_score.softmax(-1)
bbox_cls_pred = bbox_cls_pred.permute(1, 2, 0).reshape(
-1, self.side_num * 4)
bbox_reg_pred = bbox_reg_pred.permute(1, 2, 0).reshape(
-1, self.side_num * 4)
nms_pre = cfg.get('nms_pre', -1)
if nms_pre > 0 and scores.shape[0] > nms_pre:
if self.use_sigmoid_cls:
max_scores, _ = scores.max(dim=1)
else:
max_scores, _ = scores[:, :-1].max(dim=1)
_, topk_inds = max_scores.topk(nms_pre)
anchors = anchors[topk_inds, :]
bbox_cls_pred = bbox_cls_pred[topk_inds, :]
bbox_reg_pred = bbox_reg_pred[topk_inds, :]
scores = scores[topk_inds, :]
bbox_preds = [
bbox_cls_pred.contiguous(),
bbox_reg_pred.contiguous()
]
bboxes, confids = self.bbox_coder.decode(
anchors.contiguous(), bbox_preds, max_shape=img_shape)
mlvl_bboxes.append(bboxes)
mlvl_scores.append(scores)
mlvl_confids.append(confids)
mlvl_bboxes = torch.cat(mlvl_bboxes)
if rescale:
mlvl_bboxes /= mlvl_bboxes.new_tensor(scale_factor)
mlvl_scores = torch.cat(mlvl_scores)
mlvl_confids = torch.cat(mlvl_confids)
if self.use_sigmoid_cls:
padding = mlvl_scores.new_zeros(mlvl_scores.shape[0], 1)
mlvl_scores = torch.cat([mlvl_scores, padding], dim=1)
det_bboxes, det_labels = multiclass_nms(
mlvl_bboxes,
mlvl_scores,
cfg.score_thr,
cfg.nms,
cfg.max_per_img,
score_factors=mlvl_confids)
return det_bboxes, det_labels
| 1 | 25,917 | do we also need to change `mlvl_confid` -> `mlvl_confidences`> | open-mmlab-mmdetection | py |
@@ -125,7 +125,8 @@ type Config struct {
MetadataAddr string `config:"hostname;127.0.0.1;die-on-fail"`
MetadataPort int `config:"int(0,65535);8775;die-on-fail"`
- InterfacePrefix string `config:"iface-list;cali;non-zero,die-on-fail"`
+ InterfacePrefix string `config:"iface-list;cali;non-zero,die-on-fail"`
+ InterfaceExclude string `config:"iface-list;kube-ipvs0"`
ChainInsertMode string `config:"oneof(insert,append);insert;non-zero,die-on-fail"`
DefaultEndpointToHostAction string `config:"oneof(DROP,RETURN,ACCEPT);DROP;non-zero,die-on-fail"` | 1 | // Copyright (c) 2016-2017 Tigera, Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package config
import (
"errors"
"fmt"
"net"
"os"
"reflect"
"regexp"
"strconv"
"strings"
"time"
log "github.com/sirupsen/logrus"
"github.com/projectcalico/libcalico-go/lib/api"
"github.com/projectcalico/libcalico-go/lib/client"
)
var (
IfaceListRegexp = regexp.MustCompile(`^[a-zA-Z0-9_-]{1,15}(,[a-zA-Z0-9_-]{1,15})*$`)
AuthorityRegexp = regexp.MustCompile(`^[^:/]+:\d+$`)
HostnameRegexp = regexp.MustCompile(`^[a-zA-Z0-9_.-]+$`)
StringRegexp = regexp.MustCompile(`^.*$`)
)
const (
maxUint = ^uint(0)
maxInt = int(maxUint >> 1)
minInt = -maxInt - 1
)
// Source of a config value. Values from higher-numbered sources override
// those from lower-numbered sources. Note: some parameters (such as those
// needed to connect to the datastore) can only be set from a local source.
type Source uint8
const (
Default = iota
DatastoreGlobal
DatastorePerHost
ConfigFile
EnvironmentVariable
)
var SourcesInDescendingOrder = []Source{EnvironmentVariable, ConfigFile, DatastorePerHost, DatastoreGlobal}
func (source Source) String() string {
switch source {
case Default:
return "<default>"
case DatastoreGlobal:
return "datastore (global)"
case DatastorePerHost:
return "datastore (per-host)"
case ConfigFile:
return "config file"
case EnvironmentVariable:
return "environment variable"
}
return fmt.Sprintf("<unknown(%v)>", uint8(source))
}
func (source Source) Local() bool {
switch source {
case Default, ConfigFile, EnvironmentVariable:
return true
default:
return false
}
}
// Config contains the best, parsed config values loaded from the various sources.
// We use tags to control the parsing and validation.
type Config struct {
// Configuration parameters.
UseInternalDataplaneDriver bool `config:"bool;true"`
DataplaneDriver string `config:"file(must-exist,executable);calico-iptables-plugin;non-zero,die-on-fail,skip-default-validation"`
DatastoreType string `config:"oneof(kubernetes,etcdv2);etcdv2;non-zero,die-on-fail"`
FelixHostname string `config:"hostname;;local,non-zero"`
EtcdAddr string `config:"authority;127.0.0.1:2379;local"`
EtcdScheme string `config:"oneof(http,https);http;local"`
EtcdKeyFile string `config:"file(must-exist);;local"`
EtcdCertFile string `config:"file(must-exist);;local"`
EtcdCaFile string `config:"file(must-exist);;local"`
EtcdEndpoints []string `config:"endpoint-list;;local"`
TyphaAddr string `config:"authority;;"`
TyphaK8sServiceName string `config:"string;"`
TyphaK8sNamespace string `config:"string;kube-system;non-zero"`
TyphaReadTimeout time.Duration `config:"seconds;30"`
TyphaWriteTimeout time.Duration `config:"seconds;10"`
Ipv6Support bool `config:"bool;true"`
IgnoreLooseRPF bool `config:"bool;false"`
RouteRefreshInterval time.Duration `config:"seconds;90"`
IptablesRefreshInterval time.Duration `config:"seconds;90"`
IptablesPostWriteCheckIntervalSecs time.Duration `config:"seconds;1"`
IptablesLockFilePath string `config:"file;/run/xtables.lock"`
IptablesLockTimeoutSecs time.Duration `config:"seconds;0"`
IptablesLockProbeIntervalMillis time.Duration `config:"millis;50"`
IpsetsRefreshInterval time.Duration `config:"seconds;10"`
MaxIpsetSize int `config:"int;1048576;non-zero"`
NetlinkTimeoutSecs time.Duration `config:"seconds;10"`
MetadataAddr string `config:"hostname;127.0.0.1;die-on-fail"`
MetadataPort int `config:"int(0,65535);8775;die-on-fail"`
InterfacePrefix string `config:"iface-list;cali;non-zero,die-on-fail"`
ChainInsertMode string `config:"oneof(insert,append);insert;non-zero,die-on-fail"`
DefaultEndpointToHostAction string `config:"oneof(DROP,RETURN,ACCEPT);DROP;non-zero,die-on-fail"`
IptablesFilterAllowAction string `config:"oneof(ACCEPT,RETURN);ACCEPT;non-zero,die-on-fail"`
IptablesMangleAllowAction string `config:"oneof(ACCEPT,RETURN);ACCEPT;non-zero,die-on-fail"`
LogPrefix string `config:"string;calico-packet"`
LogFilePath string `config:"file;/var/log/calico/felix.log;die-on-fail"`
LogSeverityFile string `config:"oneof(DEBUG,INFO,WARNING,ERROR,CRITICAL);INFO"`
LogSeverityScreen string `config:"oneof(DEBUG,INFO,WARNING,ERROR,CRITICAL);INFO"`
LogSeveritySys string `config:"oneof(DEBUG,INFO,WARNING,ERROR,CRITICAL);INFO"`
IpInIpEnabled bool `config:"bool;false"`
IpInIpMtu int `config:"int;1440;non-zero"`
IpInIpTunnelAddr net.IP `config:"ipv4;"`
ReportingIntervalSecs time.Duration `config:"seconds;30"`
ReportingTTLSecs time.Duration `config:"seconds;90"`
EndpointReportingEnabled bool `config:"bool;false"`
EndpointReportingDelaySecs time.Duration `config:"seconds;1"`
IptablesMarkMask uint32 `config:"mark-bitmask;0xff000000;non-zero,die-on-fail"`
DisableConntrackInvalidCheck bool `config:"bool;false"`
HealthEnabled bool `config:"bool;false"`
HealthPort int `config:"int(0,65535);9099"`
PrometheusMetricsEnabled bool `config:"bool;false"`
PrometheusMetricsPort int `config:"int(0,65535);9091"`
PrometheusGoMetricsEnabled bool `config:"bool;true"`
PrometheusProcessMetricsEnabled bool `config:"bool;true"`
FailsafeInboundHostPorts []ProtoPort `config:"port-list;tcp:22,udp:68;die-on-fail"`
FailsafeOutboundHostPorts []ProtoPort `config:"port-list;tcp:2379,tcp:2380,tcp:4001,tcp:7001,udp:53,udp:67;die-on-fail"`
UsageReportingEnabled bool `config:"bool;true"`
ClusterGUID string `config:"string;baddecaf"`
ClusterType string `config:"string;"`
CalicoVersion string `config:"string;"`
DebugMemoryProfilePath string `config:"file;;"`
DebugDisableLogDropping bool `config:"bool;false"`
DebugSimulateCalcGraphHangAfter time.Duration `config:"seconds;0"`
DebugSimulateDataplaneHangAfter time.Duration `config:"seconds;0"`
// State tracking.
// nameToSource tracks where we loaded each config param from.
sourceToRawConfig map[Source]map[string]string
rawValues map[string]string
Err error
numIptablesBitsAllocated int
}
type ProtoPort struct {
Protocol string
Port uint16
}
// Load parses and merges the rawData from one particular source into this config object.
// If there is a config value already loaded from a higher-priority source, then
// the new value will be ignored (after validation).
func (config *Config) UpdateFrom(rawData map[string]string, source Source) (changed bool, err error) {
log.Infof("Merging in config from %v: %v", source, rawData)
// Defensively take a copy of the raw data, in case we've been handed
// a mutable map by mistake.
rawDataCopy := make(map[string]string)
for k, v := range rawData {
if v == "" {
log.WithFields(log.Fields{
"name": k,
"source": source,
}).Info("Ignoring empty configuration parameter. Use value 'none' if " +
"your intention is to explicitly disable the default value.")
continue
}
rawDataCopy[k] = v
}
config.sourceToRawConfig[source] = rawDataCopy
changed, err = config.resolve()
return
}
func (c *Config) InterfacePrefixes() []string {
return strings.Split(c.InterfacePrefix, ",")
}
func (config *Config) OpenstackActive() bool {
if strings.Contains(strings.ToLower(config.ClusterType), "openstack") {
// OpenStack is explicitly known to be present. Newer versions of the OpenStack plugin
// set this flag.
log.Debug("Cluster type contains OpenStack")
return true
}
// If we get here, either OpenStack isn't present or we're running against an old version
// of the OpenStack plugin, which doesn't set the flag. Use heuristics based on the
// presence of the OpenStack-related parameters.
if config.MetadataAddr != "" && config.MetadataAddr != "127.0.0.1" {
log.Debug("OpenStack metadata IP set to non-default, assuming OpenStack active")
return true
}
if config.MetadataPort != 0 && config.MetadataPort != 8775 {
log.Debug("OpenStack metadata port set to non-default, assuming OpenStack active")
return true
}
for _, prefix := range config.InterfacePrefixes() {
if prefix == "tap" {
log.Debug("Interface prefix list contains 'tap', assuming OpenStack")
return true
}
}
log.Debug("No evidence this is an OpenStack deployment; disabling OpenStack special-cases")
return false
}
func (config *Config) NextIptablesMark() uint32 {
mark := config.NthIPTablesMark(config.numIptablesBitsAllocated)
config.numIptablesBitsAllocated++
return mark
}
func (config *Config) NthIPTablesMark(n int) uint32 {
numBitsFound := 0
for shift := uint(0); shift < 32; shift++ {
candidate := uint32(1) << shift
if config.IptablesMarkMask&candidate > 0 {
if numBitsFound == n {
return candidate
}
numBitsFound += 1
}
}
log.WithFields(log.Fields{
"IptablesMarkMask": config.IptablesMarkMask,
"requestedMark": n,
}).Panic("Not enough iptables mark bits available.")
return 0
}
func (config *Config) resolve() (changed bool, err error) {
newRawValues := make(map[string]string)
nameToSource := make(map[string]Source)
for _, source := range SourcesInDescendingOrder {
valueLoop:
for rawName, rawValue := range config.sourceToRawConfig[source] {
currentSource := nameToSource[rawName]
param, ok := knownParams[strings.ToLower(rawName)]
if !ok {
if source >= currentSource {
// Stash the raw value in case it's useful for
// a plugin. Since we don't know the canonical
// name, use the raw name.
newRawValues[rawName] = rawValue
nameToSource[rawName] = source
}
log.WithField("raw name", rawName).Info(
"Ignoring unknown config param.")
continue valueLoop
}
metadata := param.GetMetadata()
name := metadata.Name
if metadata.Local && !source.Local() {
log.Warningf("Ignoring local-only configuration for %v from %v",
name, source)
continue valueLoop
}
log.Infof("Parsing value for %v: %v (from %v)",
name, rawValue, source)
var value interface{}
if strings.ToLower(rawValue) == "none" {
// Special case: we allow a value of "none" to force the value to
// the zero value for a field. The zero value often differs from
// the default value. Typically, the zero value means "turn off
// the feature".
if metadata.NonZero {
err = errors.New("Non-zero field cannot be set to none")
log.Errorf(
"Failed to parse value for %v: %v from source %v. %v",
name, rawValue, source, err)
config.Err = err
return
}
value = metadata.ZeroValue
log.Infof("Value set to 'none', replacing with zero-value: %#v.",
value)
} else {
value, err = param.Parse(rawValue)
if err != nil {
logCxt := log.WithError(err).WithField("source", source)
if metadata.DieOnParseFailure {
logCxt.Error("Invalid (required) config value.")
config.Err = err
return
} else {
logCxt.WithField("default", metadata.Default).Warn(
"Replacing invalid value with default")
value = metadata.Default
err = nil
}
}
}
log.Infof("Parsed value for %v: %v (from %v)",
name, value, source)
if source < currentSource {
log.Infof("Skipping config value for %v from %v; "+
"already have a value from %v", name,
source, currentSource)
continue
}
field := reflect.ValueOf(config).Elem().FieldByName(name)
field.Set(reflect.ValueOf(value))
newRawValues[name] = rawValue
nameToSource[name] = source
}
}
changed = !reflect.DeepEqual(newRawValues, config.rawValues)
config.rawValues = newRawValues
return
}
func (config *Config) DatastoreConfig() api.CalicoAPIConfig {
// Special case for etcdv2 datastore, where we want to honour established Felix-specific
// config mechanisms.
if config.DatastoreType == "etcdv2" {
// Build a CalicoAPIConfig with the etcd fields filled in from Felix-specific
// config.
var etcdEndpoints string
if len(config.EtcdEndpoints) == 0 {
etcdEndpoints = config.EtcdScheme + "://" + config.EtcdAddr
} else {
etcdEndpoints = strings.Join(config.EtcdEndpoints, ",")
}
etcdCfg := api.EtcdConfig{
EtcdEndpoints: etcdEndpoints,
EtcdKeyFile: config.EtcdKeyFile,
EtcdCertFile: config.EtcdCertFile,
EtcdCACertFile: config.EtcdCaFile,
}
return api.CalicoAPIConfig{
Spec: api.CalicoAPIConfigSpec{
DatastoreType: api.EtcdV2,
EtcdConfig: etcdCfg,
},
}
}
// Build CalicoAPIConfig from the environment. This means that any XxxYyy field in
// CalicoAPIConfigSpec can be set by a corresponding XXX_YYY or CALICO_XXX_YYY environment
// variable, and that the datastore type can be set by a DATASTORE_TYPE or
// CALICO_DATASTORE_TYPE variable. (Except in the etcdv2 case which is handled specially
// above.)
cfg, err := client.LoadClientConfigFromEnvironment()
if err != nil {
log.WithError(err).Panic("Failed to create datastore config")
}
// If that didn't set the datastore type (in which case the field will have been set to its
// default 'etcdv2' value), copy it from the Felix config.
if cfg.Spec.DatastoreType == "etcdv2" {
cfg.Spec.DatastoreType = api.DatastoreType(config.DatastoreType)
}
if !config.IpInIpEnabled {
// Polling k8s for node updates is expensive (because we get many superfluous
// updates) so disable if we don't need it.
log.Info("IPIP disabled, disabling node poll (if KDD is in use).")
cfg.Spec.K8sDisableNodePoll = true
}
return *cfg
}
// Validate() performs cross-field validation.
func (config *Config) Validate() (err error) {
if config.FelixHostname == "" {
err = errors.New("Failed to determine hostname")
}
if config.DatastoreType == "etcdv2" && len(config.EtcdEndpoints) == 0 {
if config.EtcdScheme == "" {
err = errors.New("EtcdEndpoints and EtcdScheme both missing")
}
if config.EtcdAddr == "" {
err = errors.New("EtcdEndpoints and EtcdAddr both missing")
}
}
if err != nil {
config.Err = err
}
return
}
var knownParams map[string]param
func loadParams() {
knownParams = make(map[string]param)
config := Config{}
kind := reflect.TypeOf(config)
metaRegexp := regexp.MustCompile(`^([^;(]+)(?:\(([^)]*)\))?;` +
`([^;]*)(?:;` +
`([^;]*))?$`)
for ii := 0; ii < kind.NumField(); ii++ {
field := kind.Field(ii)
tag := field.Tag.Get("config")
if tag == "" {
continue
}
captures := metaRegexp.FindStringSubmatch(tag)
if len(captures) == 0 {
log.Panicf("Failed to parse metadata for config param %v", field.Name)
}
log.Debugf("%v: metadata captures: %#v", field.Name, captures)
kind := captures[1] // Type: "int|oneof|bool|port-list|..."
kindParams := captures[2] // Parameters for the type: e.g. for oneof "http,https"
defaultStr := captures[3] // Default value e.g "1.0"
flags := captures[4]
var param param
var err error
switch kind {
case "bool":
param = &BoolParam{}
case "int":
min := minInt
max := maxInt
if kindParams != "" {
minAndMax := strings.Split(kindParams, ",")
min, err = strconv.Atoi(minAndMax[0])
if err != nil {
log.Panicf("Failed to parse min value for %v", field.Name)
}
max, err = strconv.Atoi(minAndMax[1])
if err != nil {
log.Panicf("Failed to parse max value for %v", field.Name)
}
}
param = &IntParam{Min: min, Max: max}
case "int32":
param = &Int32Param{}
case "mark-bitmask":
param = &MarkBitmaskParam{}
case "float":
param = &FloatParam{}
case "seconds":
param = &SecondsParam{}
case "millis":
param = &MillisParam{}
case "iface-list":
param = &RegexpParam{Regexp: IfaceListRegexp,
Msg: "invalid Linux interface name"}
case "file":
param = &FileParam{
MustExist: strings.Contains(kindParams, "must-exist"),
Executable: strings.Contains(kindParams, "executable"),
}
case "authority":
param = &RegexpParam{Regexp: AuthorityRegexp,
Msg: "invalid URL authority"}
case "ipv4":
param = &Ipv4Param{}
case "endpoint-list":
param = &EndpointListParam{}
case "port-list":
param = &PortListParam{}
case "hostname":
param = &RegexpParam{Regexp: HostnameRegexp,
Msg: "invalid hostname"}
case "oneof":
options := strings.Split(kindParams, ",")
lowerCaseToCanon := make(map[string]string)
for _, option := range options {
lowerCaseToCanon[strings.ToLower(option)] = option
}
param = &OneofListParam{
lowerCaseOptionsToCanonical: lowerCaseToCanon}
case "string":
param = &RegexpParam{Regexp: StringRegexp,
Msg: "invalid string"}
default:
log.Panicf("Unknown type of parameter: %v", kind)
}
metadata := param.GetMetadata()
metadata.Name = field.Name
metadata.ZeroValue = reflect.ValueOf(config).FieldByName(field.Name).Interface()
if strings.Index(flags, "non-zero") > -1 {
metadata.NonZero = true
}
if strings.Index(flags, "die-on-fail") > -1 {
metadata.DieOnParseFailure = true
}
if strings.Index(flags, "local") > -1 {
metadata.Local = true
}
if defaultStr != "" {
if strings.Index(flags, "skip-default-validation") > -1 {
metadata.Default = defaultStr
} else {
// Parse the default value and save it in the metadata. Doing
// that here ensures that we syntax-check the defaults now.
defaultVal, err := param.Parse(defaultStr)
if err != nil {
log.Panicf("Invalid default value: %v", err)
}
metadata.Default = defaultVal
}
} else {
metadata.Default = metadata.ZeroValue
}
knownParams[strings.ToLower(field.Name)] = param
}
}
func (config *Config) RawValues() map[string]string {
return config.rawValues
}
func New() *Config {
if knownParams == nil {
loadParams()
}
p := &Config{
rawValues: make(map[string]string),
sourceToRawConfig: make(map[Source]map[string]string),
}
for _, param := range knownParams {
param.setDefault(p)
}
hostname, err := os.Hostname()
if err != nil {
log.Warningf("Failed to get hostname from kernel, "+
"trying HOSTNAME variable: %v", err)
hostname = os.Getenv("HOSTNAME")
}
p.FelixHostname = hostname
return p
}
type param interface {
GetMetadata() *Metadata
Parse(raw string) (result interface{}, err error)
setDefault(*Config)
}
| 1 | 15,810 | I feel slightly that InterfaceExclude is not a clear name - bearing in mind that our config names are, to some extent, an external API. From an external point of view, a clearer name might be IPVSInterfaces. Then it would obviously make sense for the value to be something like 'kube-ipvs0', and it would be a matter of Felix's internal implementation that we choose to exclude those interfaces from our monitoring. Alternatively, if we want to keep this at a level that is more general than just IPVS, perhaps 'UnmonitoredInterfaces'? WDYT? | projectcalico-felix | go |
@@ -180,7 +180,7 @@ def transform_path(path):
path = utils.expand_windows_drive(path)
# Drive dependent working directories are not supported, e.g.
# E:filename is invalid
- if re.match(r'[A-Z]:[^\\]', path, re.IGNORECASE):
+ if re.fullmatch(r'[A-Z]:[^\\]', path, re.IGNORECASE):
return None
# Paths like COM1, ...
# See https://github.com/qutebrowser/qutebrowser/issues/82 | 1 | # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2014-2017 Florian Bruhin (The Compiler) <[email protected]>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Shared QtWebKit/QtWebEngine code for downloads."""
import re
import sys
import html
import os.path
import collections
import functools
import pathlib
import tempfile
import enum
import sip
from PyQt5.QtCore import (pyqtSlot, pyqtSignal, Qt, QObject, QModelIndex,
QTimer, QAbstractListModel)
from qutebrowser.commands import cmdexc, cmdutils
from qutebrowser.config import config
from qutebrowser.utils import (usertypes, standarddir, utils, message, log,
qtutils)
ModelRole = enum.IntEnum('ModelRole', ['item'], start=Qt.UserRole)
# Remember the last used directory
last_used_directory = None
# All REFRESH_INTERVAL milliseconds, speeds will be recalculated and downloads
# redrawn.
_REFRESH_INTERVAL = 500
class UnsupportedAttribute:
"""Class which is used to create attributes which are not supported.
This is used for attributes like "fileobj" for downloads which are not
supported with QtWebengine.
"""
pass
class UnsupportedOperationError(Exception):
"""Raised when an operation is not supported with the given backend."""
def download_dir():
"""Get the download directory to use."""
directory = config.val.downloads.location.directory
remember_dir = config.val.downloads.location.remember
if remember_dir and last_used_directory is not None:
ddir = last_used_directory
elif directory is None:
ddir = standarddir.download()
else:
ddir = directory
try:
os.makedirs(ddir)
except FileExistsError:
pass
return ddir
def immediate_download_path(prompt_download_directory=None):
"""Try to get an immediate download path without asking the user.
If that's possible, we return a path immediately. If not, None is returned.
Args:
prompt_download_directory: If this is something else than None, it
will overwrite the
downloads.location.prompt setting.
"""
if prompt_download_directory is None:
prompt_download_directory = config.val.downloads.location.prompt
if not prompt_download_directory:
return download_dir()
def _path_suggestion(filename):
"""Get the suggested file path.
Args:
filename: The filename to use if included in the suggestion.
"""
suggestion = config.val.downloads.location.suggestion
if suggestion == 'path':
# add trailing '/' if not present
return os.path.join(download_dir(), '')
elif suggestion == 'filename':
return filename
elif suggestion == 'both':
return os.path.join(download_dir(), filename)
else: # pragma: no cover
raise ValueError("Invalid suggestion value {}!".format(suggestion))
def create_full_filename(basename, filename):
"""Create a full filename based on the given basename and filename.
Args:
basename: The basename to use if filename is a directory.
filename: The path to a folder or file where you want to save.
Return:
The full absolute path, or None if filename creation was not possible.
"""
# Remove chars which can't be encoded in the filename encoding.
# See https://github.com/qutebrowser/qutebrowser/issues/427
encoding = sys.getfilesystemencoding()
filename = utils.force_encoding(filename, encoding)
basename = utils.force_encoding(basename, encoding)
if os.path.isabs(filename) and (os.path.isdir(filename) or
filename.endswith(os.sep)):
# We got an absolute directory from the user, so we save it under
# the default filename in that directory.
return os.path.join(filename, basename)
elif os.path.isabs(filename):
# We got an absolute filename from the user, so we save it under
# that filename.
return filename
return None
def get_filename_question(*, suggested_filename, url, parent=None):
"""Get a Question object for a download-path.
Args:
suggested_filename: The "default"-name that is pre-entered as path.
url: The URL the download originated from.
parent: The parent of the question (a QObject).
"""
encoding = sys.getfilesystemencoding()
suggested_filename = utils.force_encoding(suggested_filename, encoding)
q = usertypes.Question(parent)
q.title = "Save file to:"
q.text = "Please enter a location for <b>{}</b>".format(
html.escape(url.toDisplayString()))
q.mode = usertypes.PromptMode.download
q.completed.connect(q.deleteLater)
q.default = _path_suggestion(suggested_filename)
return q
def transform_path(path):
r"""Do platform-specific transformations, like changing E: to E:\.
Returns None if the path is invalid on the current platform.
"""
if not utils.is_windows:
return path
path = utils.expand_windows_drive(path)
# Drive dependent working directories are not supported, e.g.
# E:filename is invalid
if re.match(r'[A-Z]:[^\\]', path, re.IGNORECASE):
return None
# Paths like COM1, ...
# See https://github.com/qutebrowser/qutebrowser/issues/82
if pathlib.Path(path).is_reserved():
return None
return path
def suggested_fn_from_title(url_path, title=None):
"""Suggest a filename depending on the URL extension and page title.
Args:
url_path: a string with the URL path
title: the page title string
Return:
The download filename based on the title, or None if the extension is
not found in the whitelist (or if there is no page title).
"""
ext_whitelist = [".html", ".htm", ".php", ""]
_, ext = os.path.splitext(url_path)
if ext.lower() in ext_whitelist and title:
suggested_fn = utils.sanitize_filename(title)
if not suggested_fn.lower().endswith((".html", ".htm")):
suggested_fn += ".html"
else:
suggested_fn = None
return suggested_fn
class NoFilenameError(Exception):
"""Raised when we can't find out a filename in DownloadTarget."""
# Where a download should be saved
class _DownloadTarget:
"""Abstract base class for different download targets."""
def __init__(self):
raise NotImplementedError
def suggested_filename(self):
"""Get the suggested filename for this download target."""
raise NotImplementedError
class FileDownloadTarget(_DownloadTarget):
"""Save the download to the given file.
Attributes:
filename: Filename where the download should be saved.
"""
def __init__(self, filename):
# pylint: disable=super-init-not-called
self.filename = filename
def suggested_filename(self):
return os.path.basename(self.filename)
def __str__(self):
return self.filename
class FileObjDownloadTarget(_DownloadTarget):
"""Save the download to the given file-like object.
Attributes:
fileobj: File-like object where the download should be written to.
"""
def __init__(self, fileobj):
# pylint: disable=super-init-not-called
self.fileobj = fileobj
def suggested_filename(self):
try:
return self.fileobj.name
except AttributeError:
raise NoFilenameError
def __str__(self):
try:
return 'file object at {}'.format(self.fileobj.name)
except AttributeError:
return 'anonymous file object'
class OpenFileDownloadTarget(_DownloadTarget):
"""Save the download in a temp dir and directly open it.
Attributes:
cmdline: The command to use as string. A `{}` is expanded to the
filename. None means to use the system's default application.
If no `{}` is found, the filename is appended to the cmdline.
"""
def __init__(self, cmdline=None):
# pylint: disable=super-init-not-called
self.cmdline = cmdline
def suggested_filename(self):
raise NoFilenameError
def __str__(self):
return 'temporary file'
class DownloadItemStats(QObject):
"""Statistics (bytes done, total bytes, time, etc.) about a download.
Class attributes:
SPEED_AVG_WINDOW: How many seconds of speed data to average to
estimate the remaining time.
Attributes:
done: How many bytes there are already downloaded.
total: The total count of bytes. None if the total is unknown.
speed: The current download speed, in bytes per second.
_speed_avg: A rolling average of speeds.
_last_done: The count of bytes which where downloaded when calculating
the speed the last time.
"""
SPEED_AVG_WINDOW = 30
def __init__(self, parent=None):
super().__init__(parent)
self.total = None
self.done = 0
self.speed = 0
self._last_done = 0
samples = int(self.SPEED_AVG_WINDOW * (1000 / _REFRESH_INTERVAL))
self._speed_avg = collections.deque(maxlen=samples)
def update_speed(self):
"""Recalculate the current download speed.
The caller needs to guarantee this is called all _REFRESH_INTERVAL ms.
"""
if self.done is None:
# this can happen for very fast downloads, e.g. when actually
# opening a file
return
delta = self.done - self._last_done
self.speed = delta * 1000 / _REFRESH_INTERVAL
self._speed_avg.append(self.speed)
self._last_done = self.done
def finish(self):
"""Set the download stats as finished."""
self.done = self.total
def percentage(self):
"""The current download percentage, or None if unknown."""
if self.done == self.total:
return 100
elif self.total == 0 or self.total is None:
return None
else:
return 100 * self.done / self.total
def remaining_time(self):
"""The remaining download time in seconds, or None."""
if self.total is None or not self._speed_avg:
# No average yet or we don't know the total size.
return None
remaining_bytes = self.total - self.done
avg = sum(self._speed_avg) / len(self._speed_avg)
if avg == 0:
# Download stalled
return None
else:
return remaining_bytes / avg
@pyqtSlot('qint64', 'qint64')
def on_download_progress(self, bytes_done, bytes_total):
"""Update local variables when the download progress changed.
Args:
bytes_done: How many bytes are downloaded.
bytes_total: How many bytes there are to download in total.
"""
if bytes_total in [0, -1]: # QtWebEngine, QtWebKit
bytes_total = None
self.done = bytes_done
self.total = bytes_total
class AbstractDownloadItem(QObject):
"""Shared QtNetwork/QtWebEngine part of a download item.
Attributes:
done: Whether the download is finished.
stats: A DownloadItemStats object.
index: The index of the download in the view.
successful: Whether the download has completed successfully.
error_msg: The current error message, or None
fileobj: The file object to download the file to.
raw_headers: The headers sent by the server.
_filename: The filename of the download.
_dead: Whether the Download has _die()'d.
Signals:
data_changed: The downloads metadata changed.
finished: The download was finished.
cancelled: The download was cancelled.
error: An error with the download occurred.
arg: The error message as string.
remove_requested: Emitted when the removal of this download was
requested.
"""
data_changed = pyqtSignal()
finished = pyqtSignal()
error = pyqtSignal(str)
cancelled = pyqtSignal()
remove_requested = pyqtSignal()
def __init__(self, parent=None):
super().__init__(parent)
self.done = False
self.stats = DownloadItemStats(self)
self.index = 0
self.error_msg = None
self.basename = '???'
self.successful = False
self.fileobj = UnsupportedAttribute()
self.raw_headers = UnsupportedAttribute()
self._filename = None
self._dead = False
def __repr__(self):
return utils.get_repr(self, basename=self.basename)
def __str__(self):
"""Get the download as a string.
Example: foo.pdf [699.2kB/s|0.34|16%|4.253/25.124]
"""
speed = utils.format_size(self.stats.speed, suffix='B/s')
down = utils.format_size(self.stats.done, suffix='B')
perc = self.stats.percentage()
remaining = self.stats.remaining_time()
if self.error_msg is None:
errmsg = ""
else:
errmsg = " - {}".format(self.error_msg)
if all(e is None for e in [perc, remaining, self.stats.total]):
return ('{index}: {name} [{speed:>10}|{down}]{errmsg}'.format(
index=self.index, name=self.basename, speed=speed,
down=down, errmsg=errmsg))
perc = round(perc)
if remaining is None:
remaining = '?'
else:
remaining = utils.format_seconds(remaining)
total = utils.format_size(self.stats.total, suffix='B')
if self.done:
return ('{index}: {name} [{perc:>2}%|{total}]{errmsg}'.format(
index=self.index, name=self.basename, perc=perc,
total=total, errmsg=errmsg))
else:
return ('{index}: {name} [{speed:>10}|{remaining:>5}|{perc:>2}%|'
'{down}/{total}]{errmsg}'.format(
index=self.index, name=self.basename, speed=speed,
remaining=remaining, perc=perc, down=down,
total=total, errmsg=errmsg))
def _do_die(self):
"""Do cleanup steps after a download has died."""
raise NotImplementedError
def _die(self, msg):
"""Abort the download and emit an error."""
assert not self.successful
# Prevent actions if calling _die() twice.
#
# For QtWebKit, this might happen if the error handler correctly
# connects, and the error occurs in _init_reply between
# reply.error.connect and the reply.error() check. In this case, the
# connected error handlers will be called twice, once via the direct
# error.emit() and once here in _die(). The stacks look like this then:
#
# <networkmanager error.emit> -> on_reply_error -> _die ->
# self.error.emit()
#
# and
#
# [_init_reply -> <single shot timer> ->] <lambda in _init_reply> ->
# self.error.emit()
#
# which may lead to duplicate error messages (and failing tests)
if self._dead:
return
self._dead = True
self._do_die()
self.error_msg = msg
self.stats.finish()
self.error.emit(msg)
self.done = True
self.data_changed.emit()
def get_status_color(self, position):
"""Choose an appropriate color for presenting the download's status.
Args:
position: The color type requested, can be 'fg' or 'bg'.
"""
assert position in ["fg", "bg"]
# pylint: disable=bad-config-option
start = getattr(config.val.colors.downloads.start, position)
stop = getattr(config.val.colors.downloads.stop, position)
system = getattr(config.val.colors.downloads.system, position)
error = getattr(config.val.colors.downloads.error, position)
# pylint: enable=bad-config-option
if self.error_msg is not None:
assert not self.successful
return error
elif self.stats.percentage() is None:
return start
else:
return utils.interpolate_color(start, stop,
self.stats.percentage(), system)
def _do_cancel(self):
"""Actual cancel implementation."""
raise NotImplementedError
@pyqtSlot()
def cancel(self, *, remove_data=True):
"""Cancel the download.
Args:
remove_data: Whether to remove the downloaded data.
"""
self._do_cancel()
log.downloads.debug("cancelled")
if remove_data:
self.delete()
self.done = True
self.finished.emit()
self.data_changed.emit()
@pyqtSlot()
def remove(self):
"""Remove the download from the model."""
self.remove_requested.emit()
def delete(self):
"""Delete the downloaded file."""
try:
if self._filename is not None and os.path.exists(self._filename):
os.remove(self._filename)
log.downloads.debug("Deleted {}".format(self._filename))
else:
log.downloads.debug("Not deleting {}".format(self._filename))
except OSError:
log.downloads.exception("Failed to remove partial file")
@pyqtSlot()
def retry(self):
"""Retry a failed download."""
raise NotImplementedError
@pyqtSlot()
def try_retry(self):
"""Try to retry a download and show an error if it's unsupported."""
try:
self.retry()
except UnsupportedOperationError as e:
message.error(str(e))
def _get_open_filename(self):
"""Get the filename to open a download.
Returns None if no suitable filename was found.
"""
raise NotImplementedError
@pyqtSlot()
def open_file(self, cmdline=None):
"""Open the downloaded file.
Args:
cmdline: The command to use as string. A `{}` is expanded to the
filename. None means to use the system's default
application or `downloads.open_dispatcher` if set. If no
`{}` is found, the filename is appended to the cmdline.
"""
assert self.successful
filename = self._get_open_filename()
if filename is None: # pragma: no cover
log.downloads.error("No filename to open the download!")
return
# By using a singleshot timer, we ensure that we return fast. This
# is important on systems where process creation takes long, as
# otherwise the prompt might hang around and cause bugs
# (see issue #2296)
QTimer.singleShot(0, lambda: utils.open_file(filename, cmdline))
def _ensure_can_set_filename(self, filename):
"""Make sure we can still set a filename."""
raise NotImplementedError
def _after_set_filename(self):
"""Finish initialization based on self._filename."""
raise NotImplementedError
def _ask_confirm_question(self, title, msg):
"""Ask a confirmation question for the download."""
raise NotImplementedError
def _ask_create_parent_question(self, title, msg,
force_overwrite, remember_directory):
"""Ask a confirmation question for the parent directory."""
raise NotImplementedError
def _set_fileobj(self, fileobj, *, autoclose=True):
"""Set a file object to save the download to.
Not supported by QtWebEngine.
Args:
fileobj: The file object to download to.
autoclose: Close the file object automatically when it's done.
"""
raise NotImplementedError
def _set_tempfile(self, fileobj):
"""Set a temporary file when opening the download."""
raise NotImplementedError
def _set_filename(self, filename, *, force_overwrite=False,
remember_directory=True):
"""Set the filename to save the download to.
Args:
filename: The full filename to save the download to.
None: special value to stop the download.
force_overwrite: Force overwriting existing files.
remember_directory: If True, remember the directory for future
downloads.
"""
filename = os.path.expanduser(filename)
self._ensure_can_set_filename(filename)
self._filename = create_full_filename(self.basename, filename)
if self._filename is None:
# We only got a filename (without directory) or a relative path
# from the user, so we append that to the default directory and
# try again.
self._filename = create_full_filename(
self.basename, os.path.join(download_dir(), filename))
# At this point, we have a misconfigured XDG_DOWNLOAD_DIR, as
# download_dir() + filename is still no absolute path.
# The config value is checked for "absoluteness", but
# ~/.config/user-dirs.dirs may be misconfigured and a non-absolute path
# may be set for XDG_DOWNLOAD_DIR
if self._filename is None:
message.error(
"XDG_DOWNLOAD_DIR points to a relative path - please check"
" your ~/.config/user-dirs.dirs. The download is saved in"
" your home directory.",
)
# fall back to $HOME as download_dir
self._filename = create_full_filename(self.basename,
os.path.expanduser('~'))
dirname = os.path.dirname(self._filename)
if not os.path.exists(dirname):
txt = ("<b>{}</b> does not exist. Create it?".
format(html.escape(
os.path.join(dirname, ""))))
self._ask_create_parent_question("Create directory?", txt,
force_overwrite,
remember_directory)
else:
self._after_create_parent_question(force_overwrite,
remember_directory)
def _after_create_parent_question(self,
force_overwrite, remember_directory):
"""After asking about parent directory.
Args:
force_overwrite: Force overwriting existing files.
remember_directory: If True, remember the directory for future
downloads.
"""
global last_used_directory
try:
os.makedirs(os.path.dirname(self._filename))
except FileExistsError:
pass
except OSError as e:
self._die(e.strerror)
self.basename = os.path.basename(self._filename)
if remember_directory:
last_used_directory = os.path.dirname(self._filename)
log.downloads.debug("Setting filename to {}".format(self._filename))
if force_overwrite:
self._after_set_filename()
elif os.path.isfile(self._filename):
# The file already exists, so ask the user if it should be
# overwritten.
txt = "<b>{}</b> already exists. Overwrite?".format(
html.escape(self._filename))
self._ask_confirm_question("Overwrite existing file?", txt)
# FIFO, device node, etc. Make sure we want to do this
elif (os.path.exists(self._filename) and
not os.path.isdir(self._filename)):
txt = ("<b>{}</b> already exists and is a special file. Write to "
"it anyways?".format(html.escape(self._filename)))
self._ask_confirm_question("Overwrite special file?", txt)
else:
self._after_set_filename()
def _open_if_successful(self, cmdline):
"""Open the downloaded file, but only if it was successful.
Args:
cmdline: Passed to DownloadItem.open_file().
"""
if not self.successful:
log.downloads.debug("{} finished but not successful, not opening!"
.format(self))
return
self.open_file(cmdline)
def set_target(self, target):
"""Set the target for a given download.
Args:
target: The DownloadTarget for this download.
"""
if isinstance(target, FileObjDownloadTarget):
self._set_fileobj(target.fileobj, autoclose=False)
elif isinstance(target, FileDownloadTarget):
self._set_filename(target.filename)
elif isinstance(target, OpenFileDownloadTarget):
try:
fobj = temp_download_manager.get_tmpfile(self.basename)
except OSError as exc:
msg = "Download error: {}".format(exc)
message.error(msg)
self.cancel()
return
self.finished.connect(
functools.partial(self._open_if_successful, target.cmdline))
self._set_tempfile(fobj)
else: # pragma: no cover
raise ValueError("Unsupported download target: {}".format(target))
class AbstractDownloadManager(QObject):
"""Backend-independent download manager code.
Attributes:
downloads: A list of active DownloadItems.
_networkmanager: A NetworkManager for generic downloads.
Signals:
begin_remove_row: Emitted before downloads are removed.
end_remove_row: Emitted after downloads are removed.
begin_insert_row: Emitted before downloads are inserted.
end_insert_row: Emitted after downloads are inserted.
data_changed: Emitted when the data of the model changed.
The argument is the index of the changed download
"""
begin_remove_row = pyqtSignal(int)
end_remove_row = pyqtSignal()
begin_insert_row = pyqtSignal(int)
end_insert_row = pyqtSignal()
data_changed = pyqtSignal(int)
def __init__(self, parent=None):
super().__init__(parent)
self.downloads = []
self._update_timer = usertypes.Timer(self, 'download-update')
self._update_timer.timeout.connect(self._update_gui)
self._update_timer.setInterval(_REFRESH_INTERVAL)
def __repr__(self):
return utils.get_repr(self, downloads=len(self.downloads))
@pyqtSlot()
def _update_gui(self):
"""Periodical GUI update of all items."""
assert self.downloads
for dl in self.downloads:
dl.stats.update_speed()
self.data_changed.emit(-1)
def _init_item(self, download, auto_remove, suggested_filename):
"""Initialize a newly created DownloadItem."""
download.cancelled.connect(download.remove)
download.remove_requested.connect(functools.partial(
self._remove_item, download))
delay = config.val.downloads.remove_finished
if delay > -1:
download.finished.connect(
lambda: QTimer.singleShot(delay, download.remove))
elif auto_remove:
download.finished.connect(download.remove)
download.data_changed.connect(
functools.partial(self._on_data_changed, download))
download.error.connect(self._on_error)
download.basename = suggested_filename
idx = len(self.downloads)
download.index = idx + 1 # "Human readable" index
self.begin_insert_row.emit(idx)
self.downloads.append(download)
self.end_insert_row.emit()
if not self._update_timer.isActive():
self._update_timer.start()
@pyqtSlot(AbstractDownloadItem)
def _on_data_changed(self, download):
"""Emit data_changed signal when download data changed."""
try:
idx = self.downloads.index(download)
except ValueError:
# download has been deleted in the meantime
return
self.data_changed.emit(idx)
@pyqtSlot(str)
def _on_error(self, msg):
"""Display error message on download errors."""
message.error("Download error: {}".format(msg))
@pyqtSlot(AbstractDownloadItem)
def _remove_item(self, download):
"""Remove a given download."""
if sip.isdeleted(self):
# https://github.com/qutebrowser/qutebrowser/issues/1242
return
try:
idx = self.downloads.index(download)
except ValueError:
# already removed
return
self.begin_remove_row.emit(idx)
del self.downloads[idx]
self.end_remove_row.emit()
download.deleteLater()
self._update_indexes()
if not self.downloads:
self._update_timer.stop()
log.downloads.debug("Removed download {}".format(download))
def _update_indexes(self):
"""Update indexes of all DownloadItems."""
for i, d in enumerate(self.downloads, 1):
d.index = i
self.data_changed.emit(-1)
def _init_filename_question(self, question, download):
"""Set up an existing filename question with a download."""
question.answered.connect(download.set_target)
question.cancelled.connect(download.cancel)
download.cancelled.connect(question.abort)
download.error.connect(question.abort)
class DownloadModel(QAbstractListModel):
"""A list model showing downloads."""
def __init__(self, qtnetwork_manager, webengine_manager=None, parent=None):
super().__init__(parent)
self._qtnetwork_manager = qtnetwork_manager
self._webengine_manager = webengine_manager
qtnetwork_manager.data_changed.connect(
functools.partial(self._on_data_changed, webengine=False))
qtnetwork_manager.begin_insert_row.connect(
functools.partial(self._on_begin_insert_row, webengine=False))
qtnetwork_manager.begin_remove_row.connect(
functools.partial(self._on_begin_remove_row, webengine=False))
qtnetwork_manager.end_insert_row.connect(self.endInsertRows)
qtnetwork_manager.end_remove_row.connect(self.endRemoveRows)
if webengine_manager is not None:
webengine_manager.data_changed.connect(
functools.partial(self._on_data_changed, webengine=True))
webengine_manager.begin_insert_row.connect(
functools.partial(self._on_begin_insert_row, webengine=True))
webengine_manager.begin_remove_row.connect(
functools.partial(self._on_begin_remove_row, webengine=True))
webengine_manager.end_insert_row.connect(self.endInsertRows)
webengine_manager.end_remove_row.connect(self.endRemoveRows)
def _all_downloads(self):
"""Combine downloads from both downloaders."""
if self._webengine_manager is None:
return self._qtnetwork_manager.downloads[:]
else:
return (self._qtnetwork_manager.downloads +
self._webengine_manager.downloads)
def __len__(self):
return len(self._all_downloads())
def __iter__(self):
return iter(self._all_downloads())
def __getitem__(self, idx):
return self._all_downloads()[idx]
def _on_begin_insert_row(self, idx, webengine=False):
log.downloads.debug("_on_begin_insert_row with idx {}, "
"webengine {}".format(idx, webengine))
if idx == -1:
self.beginInsertRows(QModelIndex(), 0, -1)
return
assert idx >= 0, idx
if webengine:
idx += len(self._qtnetwork_manager.downloads)
self.beginInsertRows(QModelIndex(), idx, idx)
def _on_begin_remove_row(self, idx, webengine=False):
log.downloads.debug("_on_begin_remove_row with idx {}, "
"webengine {}".format(idx, webengine))
if idx == -1:
self.beginRemoveRows(QModelIndex(), 0, -1)
return
assert idx >= 0, idx
if webengine:
idx += len(self._qtnetwork_manager.downloads)
self.beginRemoveRows(QModelIndex(), idx, idx)
def _on_data_changed(self, idx, *, webengine):
"""Called when a downloader's data changed.
Args:
start: The first changed index as int.
end: The last changed index as int, or -1 for all indices.
webengine: If given, the QtNetwork download length is added to the
index.
"""
if idx == -1:
start_index = self.index(0, 0)
end_index = self.last_index()
else:
if webengine:
idx += len(self._qtnetwork_manager.downloads)
start_index = self.index(idx, 0)
end_index = self.index(idx, 0)
qtutils.ensure_valid(start_index)
qtutils.ensure_valid(end_index)
self.dataChanged.emit(start_index, end_index)
def _raise_no_download(self, count):
"""Raise an exception that the download doesn't exist.
Args:
count: The index of the download
"""
if not count:
raise cmdexc.CommandError("There's no download!")
raise cmdexc.CommandError("There's no download {}!".format(count))
@cmdutils.register(instance='download-model', scope='window')
@cmdutils.argument('count', count=True)
def download_cancel(self, all_=False, count=0):
"""Cancel the last/[count]th download.
Args:
all_: Cancel all running downloads
count: The index of the download to cancel.
"""
downloads = self._all_downloads()
if all_:
for download in downloads:
if not download.done:
download.cancel()
else:
try:
download = downloads[count - 1]
except IndexError:
self._raise_no_download(count)
if download.done:
if not count:
count = len(self)
raise cmdexc.CommandError("Download {} is already done!"
.format(count))
download.cancel()
@cmdutils.register(instance='download-model', scope='window')
@cmdutils.argument('count', count=True)
def download_delete(self, count=0):
"""Delete the last/[count]th download from disk.
Args:
count: The index of the download to delete.
"""
try:
download = self[count - 1]
except IndexError:
self._raise_no_download(count)
if not download.successful:
if not count:
count = len(self)
raise cmdexc.CommandError("Download {} is not done!".format(count))
download.delete()
download.remove()
log.downloads.debug("deleted download {}".format(download))
@cmdutils.register(instance='download-model', scope='window', maxsplit=0)
@cmdutils.argument('count', count=True)
def download_open(self, cmdline: str = None, count=0):
"""Open the last/[count]th download.
If no specific command is given, this will use the system's default
application to open the file.
Args:
cmdline: The command which should be used to open the file. A `{}`
is expanded to the temporary file name. If no `{}` is
present, the filename is automatically appended to the
cmdline.
count: The index of the download to open.
"""
try:
download = self[count - 1]
except IndexError:
self._raise_no_download(count)
if not download.successful:
if not count:
count = len(self)
raise cmdexc.CommandError("Download {} is not done!".format(count))
download.open_file(cmdline)
@cmdutils.register(instance='download-model', scope='window')
@cmdutils.argument('count', count=True)
def download_retry(self, count=0):
"""Retry the first failed/[count]th download.
Args:
count: The index of the download to retry.
"""
if count:
try:
download = self[count - 1]
except IndexError:
self._raise_no_download(count)
if download.successful or not download.done:
raise cmdexc.CommandError("Download {} did not fail!".format(
count))
else:
to_retry = [d for d in self if d.done and not d.successful]
if not to_retry:
raise cmdexc.CommandError("No failed downloads!")
else:
download = to_retry[0]
download.try_retry()
def can_clear(self):
"""Check if there are finished downloads to clear."""
return any(download.done for download in self)
@cmdutils.register(instance='download-model', scope='window')
def download_clear(self):
"""Remove all finished downloads from the list."""
for download in self:
if download.done:
download.remove()
@cmdutils.register(instance='download-model', scope='window')
@cmdutils.argument('count', count=True)
def download_remove(self, all_=False, count=0):
"""Remove the last/[count]th download from the list.
Args:
all_: Remove all finished downloads.
count: The index of the download to remove.
"""
if all_:
self.download_clear()
else:
try:
download = self[count - 1]
except IndexError:
self._raise_no_download(count)
if not download.done:
if not count:
count = len(self)
raise cmdexc.CommandError("Download {} is not done!"
.format(count))
download.remove()
def running_downloads(self):
"""Return the amount of still running downloads.
Return:
The number of unfinished downloads.
"""
return sum(1 for download in self if not download.done)
def last_index(self):
"""Get the last index in the model.
Return:
A (possibly invalid) QModelIndex.
"""
idx = self.index(self.rowCount() - 1)
return idx
def headerData(self, section, orientation, role=Qt.DisplayRole):
"""Simple constant header."""
if (section == 0 and orientation == Qt.Horizontal and
role == Qt.DisplayRole):
return "Downloads"
else:
return ""
def data(self, index, role):
"""Download data from DownloadManager."""
if not index.isValid():
return None
if index.parent().isValid() or index.column() != 0:
return None
item = self[index.row()]
if role == Qt.DisplayRole:
data = str(item)
elif role == Qt.ForegroundRole:
data = item.get_status_color('fg')
elif role == Qt.BackgroundRole:
data = item.get_status_color('bg')
elif role == ModelRole.item:
data = item
elif role == Qt.ToolTipRole:
if item.error_msg is None:
data = None
else:
return item.error_msg
else:
data = None
return data
def flags(self, index):
"""Override flags so items aren't selectable.
The default would be Qt.ItemIsEnabled | Qt.ItemIsSelectable.
"""
if not index.isValid():
return Qt.ItemFlags()
return Qt.ItemIsEnabled | Qt.ItemNeverHasChildren
def rowCount(self, parent=QModelIndex()):
"""Get count of active downloads."""
if parent.isValid():
# We don't have children
return 0
return len(self)
class TempDownloadManager:
"""Manager to handle temporary download files.
The downloads are downloaded to a temporary location and then openened with
the system standard application. The temporary files are deleted when
qutebrowser is shutdown.
Attributes:
files: A list of NamedTemporaryFiles of downloaded items.
"""
def __init__(self):
self.files = []
self._tmpdir = None
def cleanup(self):
"""Clean up any temporary files."""
if self._tmpdir is not None:
try:
self._tmpdir.cleanup()
except OSError:
log.misc.exception("Failed to clean up temporary download "
"directory")
self._tmpdir = None
def _get_tmpdir(self):
"""Return the temporary directory that is used for downloads.
The directory is created lazily on first access.
Return:
The tempfile.TemporaryDirectory that is used.
"""
if self._tmpdir is None:
self._tmpdir = tempfile.TemporaryDirectory(
prefix='qutebrowser-downloads-')
return self._tmpdir
def get_tmpfile(self, suggested_name):
"""Return a temporary file in the temporary downloads directory.
The files are kept as long as qutebrowser is running and automatically
cleaned up at program exit.
Args:
suggested_name: str of the "suggested"/original filename. Used as a
suffix, so any file extenions are preserved.
Return:
A tempfile.NamedTemporaryFile that should be used to save the file.
"""
tmpdir = self._get_tmpdir()
encoding = sys.getfilesystemencoding()
suggested_name = utils.force_encoding(suggested_name, encoding)
# Make sure that the filename is not too long
suggested_name = utils.elide_filename(suggested_name, 50)
fobj = tempfile.NamedTemporaryFile(dir=tmpdir.name, delete=False,
suffix=suggested_name)
self.files.append(fobj)
return fobj
temp_download_manager = TempDownloadManager()
| 1 | 20,064 | This should be `re.search` with a `^` anchor added to the regex, as what we want here is really any path starting with something like `E:`. | qutebrowser-qutebrowser | py |
@@ -108,6 +108,14 @@ func determineResourceHealth(key ResourceKey, obj *unstructured.Unstructured) (s
return determineClusterRoleHealth(obj)
case KindClusterRoleBinding:
return determineClusterRoleBindingHealth(obj)
+ case KindVirtualService:
+ return determineVirtualService(obj)
+ case KindDestinationRule:
+ return determineDestinationRule(obj)
+ case KindGateway:
+ return determineGateway(obj)
+ case KindServiceEntry:
+ return determineServiceEntry(obj)
default:
desc = "Unimplemented or unknown resource"
return | 1 | // Copyright 2020 The PipeCD Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package kubernetes
import (
"fmt"
"sort"
"strings"
"time"
appsv1 "k8s.io/api/apps/v1"
batchv1 "k8s.io/api/batch/v1"
corev1 "k8s.io/api/core/v1"
extensionsv1beta1 "k8s.io/api/extensions/v1beta1"
networkingv1 "k8s.io/api/networking/v1"
networkingv1beta1 "k8s.io/api/networking/v1beta1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/client-go/kubernetes/scheme"
"github.com/pipe-cd/pipe/pkg/model"
)
func MakeKubernetesResourceState(uid string, key ResourceKey, obj *unstructured.Unstructured, now time.Time) model.KubernetesResourceState {
var (
owners = obj.GetOwnerReferences()
ownerIDs = make([]string, 0, len(owners))
creationTime = obj.GetCreationTimestamp()
status, desc = determineResourceHealth(key, obj)
)
for _, owner := range owners {
ownerIDs = append(ownerIDs, string(owner.UID))
}
sort.Strings(ownerIDs)
state := model.KubernetesResourceState{
Id: uid,
OwnerIds: ownerIDs,
// TODO: Think about adding more parents by using label selectors
ParentIds: ownerIDs,
Name: key.Name,
ApiVersion: key.APIVersion,
Kind: key.Kind,
Namespace: obj.GetNamespace(),
HealthStatus: status,
HealthDescription: desc,
CreatedAt: creationTime.Unix(),
UpdatedAt: now.Unix(),
}
return state
}
func determineResourceHealth(key ResourceKey, obj *unstructured.Unstructured) (status model.KubernetesResourceState_HealthStatus, desc string) {
if !IsKubernetesBuiltInResource(key.APIVersion) {
desc = fmt.Sprintf("\"%s/%s\" was applied successfully but its health status couldn't be determined exactly. (Because tracking status for this kind of resource is not supported yet.)", key.APIVersion, key.Kind)
return
}
switch key.Kind {
case KindDeployment:
return determineDeploymentHealth(obj)
case KindStatefulSet:
return determineStatefulSetHealth(obj)
case KindDaemonSet:
return determineDaemonSetHealth(obj)
case KindReplicaSet:
return determineReplicaSetHealth(obj)
case KindPod:
return determinePodHealth(obj)
case KindJob:
return determineJobHealth(obj)
case KindCronJob:
return determineCronJobHealth(obj)
case KindService:
return determineServiceHealth(obj)
case KindIngress:
return determineIngressHealth(obj)
case KindConfigMap:
return determineConfigMapHealth(obj)
case KindPersistentVolume:
return determinePersistentVolumeHealth(obj)
case KindPersistentVolumeClaim:
return determinePVCHealth(obj)
case KindSecret:
return determineSecretHealth(obj)
case KindServiceAccount:
return determineServiceAccountHealth(obj)
case KindRole:
return determineRoleHealth(obj)
case KindRoleBinding:
return determineRoleBindingHealth(obj)
case KindClusterRole:
return determineClusterRoleHealth(obj)
case KindClusterRoleBinding:
return determineClusterRoleBindingHealth(obj)
default:
desc = "Unimplemented or unknown resource"
return
}
return
}
func determineRoleHealth(obj *unstructured.Unstructured) (status model.KubernetesResourceState_HealthStatus, desc string) {
desc = fmt.Sprintf("%q was applied successfully", obj.GetName())
status = model.KubernetesResourceState_HEALTHY
return
}
func determineRoleBindingHealth(obj *unstructured.Unstructured) (status model.KubernetesResourceState_HealthStatus, desc string) {
desc = fmt.Sprintf("%q was applied successfully", obj.GetName())
status = model.KubernetesResourceState_HEALTHY
return
}
func determineClusterRoleHealth(obj *unstructured.Unstructured) (status model.KubernetesResourceState_HealthStatus, desc string) {
desc = fmt.Sprintf("%q was applied successfully", obj.GetName())
status = model.KubernetesResourceState_HEALTHY
return
}
func determineClusterRoleBindingHealth(obj *unstructured.Unstructured) (status model.KubernetesResourceState_HealthStatus, desc string) {
desc = fmt.Sprintf("%q was applied successfully", obj.GetName())
status = model.KubernetesResourceState_HEALTHY
return
}
func determineDeploymentHealth(obj *unstructured.Unstructured) (status model.KubernetesResourceState_HealthStatus, desc string) {
d := &appsv1.Deployment{}
err := scheme.Scheme.Convert(obj, d, nil)
if err != nil {
status = model.KubernetesResourceState_OTHER
desc = fmt.Sprintf("Unexpected error while calculating: unable to convert %T to %T: %v", obj, d, err)
return
}
status = model.KubernetesResourceState_OTHER
if d.Spec.Paused {
desc = "Deployment is paused"
return
}
// Referred to:
// https://github.com/kubernetes/kubernetes/blob/7942dca975b7be9386540df3c17e309c3cb2de60/staging/src/k8s.io/kubectl/pkg/polymorphichelpers/rollout_status.go#L75
if d.Generation > d.Status.ObservedGeneration {
desc = "Waiting for rollout to finish because observed deployment generation less than desired generation"
return
}
// TimedOutReason is added in a deployment when its newest replica set fails to show any progress
// within the given deadline (progressDeadlineSeconds).
const timedOutReason = "ProgressDeadlineExceeded"
var cond *appsv1.DeploymentCondition
for i := range d.Status.Conditions {
c := d.Status.Conditions[i]
if c.Type == appsv1.DeploymentProgressing {
cond = &c
break
}
}
if cond != nil && cond.Reason == timedOutReason {
desc = fmt.Sprintf("Deployment %q exceeded its progress deadline", obj.GetName())
}
if d.Spec.Replicas == nil {
desc = "The number of desired replicas is unspecified"
return
}
if d.Status.UpdatedReplicas < *d.Spec.Replicas {
desc = fmt.Sprintf("Waiting for remaining %d/%d replicas to be updated", d.Status.UpdatedReplicas, *d.Spec.Replicas)
return
}
if d.Status.UpdatedReplicas < d.Status.Replicas {
desc = fmt.Sprintf("%d old replicas are pending termination", d.Status.Replicas-d.Status.UpdatedReplicas)
return
}
if d.Status.AvailableReplicas < d.Status.Replicas {
desc = fmt.Sprintf("Waiting for remaining %d/%d replicas to be available", d.Status.Replicas-d.Status.AvailableReplicas, d.Status.Replicas)
return
}
status = model.KubernetesResourceState_HEALTHY
return
}
func determineStatefulSetHealth(obj *unstructured.Unstructured) (status model.KubernetesResourceState_HealthStatus, desc string) {
s := &appsv1.StatefulSet{}
err := scheme.Scheme.Convert(obj, s, nil)
if err != nil {
status = model.KubernetesResourceState_OTHER
desc = fmt.Sprintf("Unexpected error while calculating: unable to convert %T to %T: %v", obj, s, err)
return
}
// Referred to:
// https://github.com/kubernetes/kubernetes/blob/7942dca975b7be9386540df3c17e309c3cb2de60/staging/src/k8s.io/kubectl/pkg/polymorphichelpers/rollout_status.go#L130-L149
status = model.KubernetesResourceState_OTHER
if s.Status.ObservedGeneration == 0 || s.Generation > s.Status.ObservedGeneration {
desc = "Waiting for statefulset spec update to be observed"
return
}
if s.Spec.Replicas == nil {
desc = "The number of desired replicas is unspecified"
return
}
if *s.Spec.Replicas != s.Status.ReadyReplicas {
desc = fmt.Sprintf("The number of ready replicas (%d) is different from the desired number (%d)", s.Status.ReadyReplicas, *s.Spec.Replicas)
return
}
// Check if the partitioned roll out is in progress.
if s.Spec.UpdateStrategy.Type == appsv1.RollingUpdateStatefulSetStrategyType && s.Spec.UpdateStrategy.RollingUpdate != nil {
if s.Spec.Replicas != nil && s.Spec.UpdateStrategy.RollingUpdate.Partition != nil {
if s.Status.UpdatedReplicas < (*s.Spec.Replicas - *s.Spec.UpdateStrategy.RollingUpdate.Partition) {
desc = fmt.Sprintf("Waiting for partitioned roll out to finish because %d out of %d new pods have been updated",
s.Status.UpdatedReplicas, (*s.Spec.Replicas - *s.Spec.UpdateStrategy.RollingUpdate.Partition))
return
}
}
status = model.KubernetesResourceState_HEALTHY
return
}
if s.Status.UpdateRevision != s.Status.CurrentRevision {
desc = fmt.Sprintf("Waiting for statefulset rolling update to complete %d pods at revision %s", s.Status.UpdatedReplicas, s.Status.UpdateRevision)
return
}
status = model.KubernetesResourceState_HEALTHY
return
}
func determineDaemonSetHealth(obj *unstructured.Unstructured) (status model.KubernetesResourceState_HealthStatus, desc string) {
d := &appsv1.DaemonSet{}
err := scheme.Scheme.Convert(obj, d, nil)
if err != nil {
status = model.KubernetesResourceState_OTHER
desc = fmt.Sprintf("Unexpected error while calculating: unable to convert %T to %T: %v", obj, d, err)
return
}
// Referred to:
// https://github.com/kubernetes/kubernetes/blob/7942dca975b7be9386540df3c17e309c3cb2de60/staging/src/k8s.io/kubectl/pkg/polymorphichelpers/rollout_status.go#L107-L115
status = model.KubernetesResourceState_OTHER
if d.Status.ObservedGeneration == 0 || d.Generation > d.Status.ObservedGeneration {
desc = "Waiting for rollout to finish because observed daemon set generation less than desired generation"
return
}
if d.Status.UpdatedNumberScheduled < d.Status.DesiredNumberScheduled {
desc = fmt.Sprintf("Waiting for daemon set %q rollout to finish because %d out of %d new pods have been updated", d.Name, d.Status.UpdatedNumberScheduled, d.Status.DesiredNumberScheduled)
return
}
if d.Status.NumberAvailable < d.Status.DesiredNumberScheduled {
desc = fmt.Sprintf("Waiting for daemon set %q rollout to finish because %d of %d updated pods are available", d.Name, d.Status.NumberAvailable, d.Status.DesiredNumberScheduled)
return
}
if d.Status.NumberMisscheduled > 0 {
desc = fmt.Sprintf("%d nodes that are running the daemon pod, but are not supposed to run the daemon pod", d.Status.NumberMisscheduled)
return
}
if d.Status.NumberUnavailable > 0 {
desc = fmt.Sprintf("%d nodes that should be running the daemon pod and have none of the daemon pod running and available", d.Status.NumberUnavailable)
return
}
status = model.KubernetesResourceState_HEALTHY
return
}
func determineReplicaSetHealth(obj *unstructured.Unstructured) (status model.KubernetesResourceState_HealthStatus, desc string) {
r := &appsv1.ReplicaSet{}
err := scheme.Scheme.Convert(obj, r, nil)
if err != nil {
status = model.KubernetesResourceState_OTHER
desc = fmt.Sprintf("Unexpected error while calculating: unable to convert %T to %T: %v", obj, r, err)
return
}
status = model.KubernetesResourceState_OTHER
if r.Status.ObservedGeneration == 0 || r.Generation > r.Status.ObservedGeneration {
desc = "Waiting for rollout to finish because observed replica set generation less than desired generation"
return
}
var cond *appsv1.ReplicaSetCondition
for i := range r.Status.Conditions {
c := r.Status.Conditions[i]
if c.Type == appsv1.ReplicaSetReplicaFailure {
cond = &c
break
}
}
switch {
case cond != nil && cond.Status == corev1.ConditionTrue:
desc = cond.Message
return
case r.Spec.Replicas == nil:
desc = "The number of desired replicas is unspecified"
return
case r.Status.AvailableReplicas < *r.Spec.Replicas:
desc = fmt.Sprintf("Waiting for rollout to finish because only %d/%d replicas are available", r.Status.AvailableReplicas, *r.Spec.Replicas)
return
case *r.Spec.Replicas != r.Status.ReadyReplicas:
desc = fmt.Sprintf("The number of ready replicas (%d) is different from the desired number (%d)", r.Status.ReadyReplicas, *r.Spec.Replicas)
return
}
status = model.KubernetesResourceState_HEALTHY
return
}
func determineCronJobHealth(obj *unstructured.Unstructured) (status model.KubernetesResourceState_HealthStatus, desc string) {
desc = fmt.Sprintf("%q was applied successfully", obj.GetName())
status = model.KubernetesResourceState_HEALTHY
return
}
func determineJobHealth(obj *unstructured.Unstructured) (status model.KubernetesResourceState_HealthStatus, desc string) {
job := &batchv1.Job{}
err := scheme.Scheme.Convert(obj, job, nil)
if err != nil {
status = model.KubernetesResourceState_OTHER
desc = fmt.Sprintf("Unexpected error while calculating: unable to convert %T to %T: %v", obj, job, err)
return
}
var (
failed bool
completed bool
message string
)
for _, condition := range job.Status.Conditions {
switch condition.Type {
case batchv1.JobFailed:
failed = true
completed = true
message = condition.Message
case batchv1.JobComplete:
completed = true
message = condition.Message
}
}
switch {
case !completed:
status = model.KubernetesResourceState_HEALTHY
desc = "Job is in progress"
case failed:
status = model.KubernetesResourceState_OTHER
desc = message
default:
status = model.KubernetesResourceState_HEALTHY
desc = message
}
return
}
func determinePodHealth(obj *unstructured.Unstructured) (status model.KubernetesResourceState_HealthStatus, desc string) {
p := &corev1.Pod{}
err := scheme.Scheme.Convert(obj, p, nil)
if err != nil {
status = model.KubernetesResourceState_OTHER
desc = fmt.Sprintf("Unexpected error while calculating: unable to convert %T to %T: %v", obj, p, err)
return
}
// Determine based on its container statuses.
if p.Spec.RestartPolicy == corev1.RestartPolicyAlways {
var messages []string
for _, s := range p.Status.ContainerStatuses {
waiting := s.State.Waiting
if waiting == nil {
continue
}
if strings.HasPrefix(waiting.Reason, "Err") || strings.HasSuffix(waiting.Reason, "Error") || strings.HasSuffix(waiting.Reason, "BackOff") {
status = model.KubernetesResourceState_OTHER
messages = append(messages, waiting.Message)
}
}
if status == model.KubernetesResourceState_OTHER {
desc = strings.Join(messages, ", ")
return
}
}
// Determine based on its phase.
switch p.Status.Phase {
case corev1.PodRunning, corev1.PodSucceeded:
status = model.KubernetesResourceState_HEALTHY
desc = p.Status.Message
default:
status = model.KubernetesResourceState_OTHER
desc = p.Status.Message
}
return
}
func determineIngressHealth(obj *unstructured.Unstructured) (status model.KubernetesResourceState_HealthStatus, desc string) {
check := func(ingressList []corev1.LoadBalancerIngress) {
if len(ingressList) == 0 {
status = model.KubernetesResourceState_OTHER
desc = "Ingress points for the load-balancer are in progress"
return
}
status = model.KubernetesResourceState_HEALTHY
return
}
v1Ingress := &networkingv1.Ingress{}
err := scheme.Scheme.Convert(obj, v1Ingress, nil)
if err == nil {
check(v1Ingress.Status.LoadBalancer.Ingress)
return
}
// PipeCD keeps supporting Kubernetes < v1.22 for the meantime so checks deprecated versions as well.
betaIngress := &networkingv1beta1.Ingress{}
err = scheme.Scheme.Convert(obj, betaIngress, nil)
if err == nil {
check(betaIngress.Status.LoadBalancer.Ingress)
return
}
extensionIngress := &extensionsv1beta1.Ingress{}
err = scheme.Scheme.Convert(obj, extensionIngress, nil)
if err == nil {
check(extensionIngress.Status.LoadBalancer.Ingress)
return
}
status = model.KubernetesResourceState_OTHER
desc = fmt.Sprintf("Unexpected error while calculating: unable to convert %T to neither %T, %T, nor %T: %v", obj, v1Ingress, betaIngress, extensionIngress, err)
return
}
func determineServiceHealth(obj *unstructured.Unstructured) (status model.KubernetesResourceState_HealthStatus, desc string) {
s := &corev1.Service{}
err := scheme.Scheme.Convert(obj, s, nil)
if err != nil {
status = model.KubernetesResourceState_OTHER
desc = fmt.Sprintf("Unexpected error while calculating: unable to convert %T to %T: %v", obj, s, err)
return
}
status = model.KubernetesResourceState_HEALTHY
if s.Spec.Type != corev1.ServiceTypeLoadBalancer {
return
}
if len(s.Status.LoadBalancer.Ingress) == 0 {
status = model.KubernetesResourceState_OTHER
desc = "Ingress points for the load-balancer are in progress"
return
}
return
}
func determineConfigMapHealth(obj *unstructured.Unstructured) (status model.KubernetesResourceState_HealthStatus, desc string) {
desc = fmt.Sprintf("%q was applied successfully", obj.GetName())
status = model.KubernetesResourceState_HEALTHY
return
}
func determineSecretHealth(obj *unstructured.Unstructured) (status model.KubernetesResourceState_HealthStatus, desc string) {
desc = fmt.Sprintf("%q was applied successfully", obj.GetName())
status = model.KubernetesResourceState_HEALTHY
return
}
func determinePersistentVolumeHealth(obj *unstructured.Unstructured) (status model.KubernetesResourceState_HealthStatus, desc string) {
pv := &corev1.PersistentVolume{}
err := scheme.Scheme.Convert(obj, pv, nil)
if err != nil {
status = model.KubernetesResourceState_OTHER
desc = fmt.Sprintf("Unexpected error while calculating: unable to convert %T to %T: %v", obj, pv, err)
return
}
switch pv.Status.Phase {
case corev1.VolumeBound, corev1.VolumeAvailable:
status = model.KubernetesResourceState_HEALTHY
desc = pv.Status.Message
return
default:
status = model.KubernetesResourceState_OTHER
desc = pv.Status.Message
return
}
}
func determinePVCHealth(obj *unstructured.Unstructured) (status model.KubernetesResourceState_HealthStatus, desc string) {
pvc := &corev1.PersistentVolumeClaim{}
err := scheme.Scheme.Convert(obj, pvc, nil)
if err != nil {
status = model.KubernetesResourceState_OTHER
desc = fmt.Sprintf("Unexpected error while calculating: unable to convert %T to %T: %v", obj, pvc, err)
return
}
switch pvc.Status.Phase {
case corev1.ClaimLost:
status = model.KubernetesResourceState_OTHER
desc = "Lost its underlying PersistentVolume"
case corev1.ClaimPending:
status = model.KubernetesResourceState_OTHER
desc = "Being not yet bound"
case corev1.ClaimBound:
status = model.KubernetesResourceState_HEALTHY
default:
status = model.KubernetesResourceState_OTHER
desc = "The current phase of PersistentVolumeClaim is unexpected"
}
return
}
func determineServiceAccountHealth(obj *unstructured.Unstructured) (status model.KubernetesResourceState_HealthStatus, desc string) {
desc = fmt.Sprintf("%q was applied successfully", obj.GetName())
status = model.KubernetesResourceState_HEALTHY
return
}
| 1 | 21,852 | Since `IsKubernetesBuiltInResource` at L69 returns false due to lack `networking.istio.io/v1alpha3` in `builtInApiVersions` within `pkg/app/piped/cloudprovider/kubernetes/resourcekey.go`, it will never reach this point. | pipe-cd-pipe | go |
@@ -40,7 +40,7 @@ import (
// themselves.
func RunEndToEndTest(ctx context.Context, t *testing.T, exp *otlpmetric.Exporter, mcMetrics Collector) {
selector := simple.NewWithInexpensiveDistribution()
- proc := processor.New(selector, exportmetric.StatelessExportKindSelector())
+ proc := processor.NewFactory(selector, exportmetric.StatelessExportKindSelector())
cont := controller.New(proc, controller.WithExporter(exp))
require.NoError(t, cont.Start(ctx))
| 1 | // Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package otlpmetrictest
import (
"context"
"fmt"
"testing"
"time"
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric"
metricpb "go.opentelemetry.io/proto/otlp/metrics/v1"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/metric"
"go.opentelemetry.io/otel/metric/number"
"go.opentelemetry.io/otel/metric/sdkapi"
exportmetric "go.opentelemetry.io/otel/sdk/export/metric"
controller "go.opentelemetry.io/otel/sdk/metric/controller/basic"
processor "go.opentelemetry.io/otel/sdk/metric/processor/basic"
"go.opentelemetry.io/otel/sdk/metric/selector/simple"
)
// RunEndToEndTest can be used by protocol driver tests to validate
// themselves.
func RunEndToEndTest(ctx context.Context, t *testing.T, exp *otlpmetric.Exporter, mcMetrics Collector) {
selector := simple.NewWithInexpensiveDistribution()
proc := processor.New(selector, exportmetric.StatelessExportKindSelector())
cont := controller.New(proc, controller.WithExporter(exp))
require.NoError(t, cont.Start(ctx))
meter := cont.MeterProvider().Meter("test-meter")
labels := []attribute.KeyValue{attribute.Bool("test", true)}
type data struct {
iKind sdkapi.InstrumentKind
nKind number.Kind
val int64
}
instruments := map[string]data{
"test-int64-counter": {sdkapi.CounterInstrumentKind, number.Int64Kind, 1},
"test-float64-counter": {sdkapi.CounterInstrumentKind, number.Float64Kind, 1},
"test-int64-valuerecorder": {sdkapi.ValueRecorderInstrumentKind, number.Int64Kind, 2},
"test-float64-valuerecorder": {sdkapi.ValueRecorderInstrumentKind, number.Float64Kind, 2},
"test-int64-valueobserver": {sdkapi.ValueObserverInstrumentKind, number.Int64Kind, 3},
"test-float64-valueobserver": {sdkapi.ValueObserverInstrumentKind, number.Float64Kind, 3},
}
for name, data := range instruments {
data := data
switch data.iKind {
case sdkapi.CounterInstrumentKind:
switch data.nKind {
case number.Int64Kind:
metric.Must(meter).NewInt64Counter(name).Add(ctx, data.val, labels...)
case number.Float64Kind:
metric.Must(meter).NewFloat64Counter(name).Add(ctx, float64(data.val), labels...)
default:
assert.Failf(t, "unsupported number testing kind", data.nKind.String())
}
case sdkapi.ValueRecorderInstrumentKind:
switch data.nKind {
case number.Int64Kind:
metric.Must(meter).NewInt64ValueRecorder(name).Record(ctx, data.val, labels...)
case number.Float64Kind:
metric.Must(meter).NewFloat64ValueRecorder(name).Record(ctx, float64(data.val), labels...)
default:
assert.Failf(t, "unsupported number testing kind", data.nKind.String())
}
case sdkapi.ValueObserverInstrumentKind:
switch data.nKind {
case number.Int64Kind:
metric.Must(meter).NewInt64ValueObserver(name,
func(_ context.Context, result metric.Int64ObserverResult) {
result.Observe(data.val, labels...)
},
)
case number.Float64Kind:
callback := func(v float64) metric.Float64ObserverFunc {
return metric.Float64ObserverFunc(func(_ context.Context, result metric.Float64ObserverResult) { result.Observe(v, labels...) })
}(float64(data.val))
metric.Must(meter).NewFloat64ValueObserver(name, callback)
default:
assert.Failf(t, "unsupported number testing kind", data.nKind.String())
}
default:
assert.Failf(t, "unsupported metrics testing kind", data.iKind.String())
}
}
// Flush and close.
require.NoError(t, cont.Stop(ctx))
// Wait >2 cycles.
<-time.After(40 * time.Millisecond)
// Now shutdown the exporter
ctx, cancel := context.WithTimeout(ctx, 10*time.Second)
defer cancel()
if err := exp.Shutdown(ctx); err != nil {
t.Fatalf("failed to stop the exporter: %v", err)
}
// Shutdown the collector too so that we can begin
// verification checks of expected data back.
_ = mcMetrics.Stop()
metrics := mcMetrics.GetMetrics()
assert.Len(t, metrics, len(instruments), "not enough metrics exported")
seen := make(map[string]struct{}, len(instruments))
for _, m := range metrics {
data, ok := instruments[m.Name]
if !ok {
assert.Failf(t, "unknown metrics", m.Name)
continue
}
seen[m.Name] = struct{}{}
switch data.iKind {
case sdkapi.CounterInstrumentKind, sdkapi.ValueObserverInstrumentKind:
var dp []*metricpb.NumberDataPoint
switch data.iKind {
case sdkapi.CounterInstrumentKind:
require.NotNil(t, m.GetSum())
dp = m.GetSum().GetDataPoints()
case sdkapi.ValueObserverInstrumentKind:
require.NotNil(t, m.GetGauge())
dp = m.GetGauge().GetDataPoints()
}
if assert.Len(t, dp, 1) {
switch data.nKind {
case number.Int64Kind:
v := &metricpb.NumberDataPoint_AsInt{AsInt: data.val}
assert.Equal(t, v, dp[0].Value, "invalid value for %q", m.Name)
case number.Float64Kind:
v := &metricpb.NumberDataPoint_AsDouble{AsDouble: float64(data.val)}
assert.Equal(t, v, dp[0].Value, "invalid value for %q", m.Name)
}
}
case sdkapi.ValueRecorderInstrumentKind:
require.NotNil(t, m.GetSummary())
if dp := m.GetSummary().DataPoints; assert.Len(t, dp, 1) {
count := dp[0].Count
assert.Equal(t, uint64(1), count, "invalid count for %q", m.Name)
assert.Equal(t, float64(data.val*int64(count)), dp[0].Sum, "invalid sum for %q (value %d)", m.Name, data.val)
}
default:
assert.Failf(t, "invalid metrics kind", data.iKind.String())
}
}
for i := range instruments {
if _, ok := seen[i]; !ok {
assert.Fail(t, fmt.Sprintf("no metric(s) exported for %q", i))
}
}
}
| 1 | 16,307 | Does codecov not run this test? Not sure how else it would not be covered. | open-telemetry-opentelemetry-go | go |
@@ -30,8 +30,9 @@ func main() {
// rootCmd represents the base command when called without any subcommands
var rootCmd = &cobra.Command{
- Use: "multisend [command]",
- Args: cobra.ExactArgs(1),
+ Use: "multisend 'JSON_DATA'",
+ Short: "multisend bytecode generator",
+ Args: cobra.ExactArgs(1),
RunE: func(cmd *cobra.Command, args []string) error {
output, err := multiSend(args)
if err == nil { | 1 | // Copyright (c) 2018 IoTeX
// This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no
// warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent
// permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache
// License 2.0 that can be found in the LICENSE file.
package main
import (
"encoding/hex"
"encoding/json"
"fmt"
"math/big"
"os"
"strings"
"github.com/ethereum/go-ethereum/accounts/abi"
"github.com/ethereum/go-ethereum/common"
"github.com/spf13/cobra"
"github.com/iotexproject/iotex-core/cli/ioctl/util"
)
func main() {
if err := rootCmd.Execute(); err != nil {
fmt.Println(err)
os.Exit(1)
}
}
// rootCmd represents the base command when called without any subcommands
var rootCmd = &cobra.Command{
Use: "multisend [command]",
Args: cobra.ExactArgs(1),
RunE: func(cmd *cobra.Command, args []string) error {
output, err := multiSend(args)
if err == nil {
fmt.Println(output)
}
return err
},
}
var abiJSON = `[{"constant":false,"inputs":[{"name":"recipients","type":"address[]"},
{"name":"amounts","type":"uint256[]"}],"name":"multiSend","outputs":[],"payable":true,
"stateMutability":"payable","type":"function"},{"anonymous":false,
"inputs":[{"indexed":false,"name":"recipient","type":"address"},
{"indexed":false,"name":"amount","type":"uint256"}],"name":"Transfer",
"type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"name":"refund",
"type":"uint256"}],"name":"Refund","type":"event"}]`
var abiFunc = "multiSend"
type targets struct {
Targets []target `json:"targets"`
Payload string `json:"payload"`
}
type target struct {
Recipient string `json:"recipient"`
Amount string `json:"amount"`
}
func multiSend(args []string) (string, error) {
var targetSet targets
if err := json.Unmarshal([]byte(args[0]), &targetSet); err != nil {
return "", err
}
recipients := make([]common.Address, 0)
amounts := make([]*big.Int, 0)
for _, target := range targetSet.Targets {
recipient, err := util.IoAddrToEvmAddr(target.Recipient)
if err != nil {
return "", err
}
recipients = append(recipients, recipient)
amount, ok := big.NewInt(0).SetString(target.Amount, 10)
if !ok {
return "", fmt.Errorf("failed to convert string to big int")
}
amounts = append(amounts, amount)
}
reader := strings.NewReader(abiJSON)
multisendABI, err := abi.JSON(reader)
if err != nil {
return "", err
}
bytecode, err := multisendABI.Pack(abiFunc, recipients, amounts)
if err != nil {
return "", err
}
return hex.EncodeToString(bytecode), nil
}
| 1 | 17,122 | can we add some sample of JSON_DATA format in the usage? | iotexproject-iotex-core | go |
@@ -25,13 +25,13 @@ namespace Datadog.Trace.Logging
_logProvider = logProvider;
}
- public void Initialize(IScopeManager scopeManager, string defaultServiceName, string version, string env)
+ public void Initialize(string defaultServiceName, string version, string env)
{
_versionProperty = version;
_environmentProperty = env;
_serviceProperty = defaultServiceName;
- _traceIdProperty = CreateTracerProperty(scopeManager, t => t.Active?.Span.TraceId.ToString());
- _spanIdProperty = CreateTracerProperty(scopeManager, t => t.Active?.Span.SpanId.ToString());
+ _traceIdProperty = CreateTracerProperty(() => Tracer.Instance.DistributedSpanContext?[HttpHeaderNames.TraceId]);
+ _spanIdProperty = CreateTracerProperty(() => Tracer.Instance.DistributedSpanContext?[HttpHeaderNames.ParentId]);
}
public IDisposable Register() | 1 | // <copyright file="LogEnricher.cs" company="Datadog">
// Unless explicitly stated otherwise all files in this repository are licensed under the Apache 2 License.
// This product includes software developed at Datadog (https://www.datadoghq.com/). Copyright 2017 Datadog, Inc.
// </copyright>
using System;
namespace Datadog.Trace.Logging
{
/// <summary>
/// Represents the context needed for log injection for a given tracer
/// </summary>
internal class LogEnricher : ILogEnricher
{
private readonly ILogProvider _logProvider;
private object _versionProperty;
private object _environmentProperty;
private object _serviceProperty;
private object _traceIdProperty;
private object _spanIdProperty;
public LogEnricher(ILogProvider logProvider)
{
_logProvider = logProvider;
}
public void Initialize(IScopeManager scopeManager, string defaultServiceName, string version, string env)
{
_versionProperty = version;
_environmentProperty = env;
_serviceProperty = defaultServiceName;
_traceIdProperty = CreateTracerProperty(scopeManager, t => t.Active?.Span.TraceId.ToString());
_spanIdProperty = CreateTracerProperty(scopeManager, t => t.Active?.Span.SpanId.ToString());
}
public IDisposable Register()
{
return new Context(_logProvider, this);
}
protected virtual object CreateTracerProperty(IScopeManager scopeManager, Func<IScopeManager, string> getter) => new TracerProperty(scopeManager, getter);
/// <summary>
/// Wraps all the individual context objects in a single instance, that can be stored in an AsyncLocal
/// </summary>
private class Context : IDisposable
{
private readonly IDisposable _environment;
private readonly IDisposable _version;
private readonly IDisposable _service;
private readonly IDisposable _traceId;
private readonly IDisposable _spanId;
public Context(ILogProvider logProvider, LogEnricher enricher)
{
try
{
_environment = logProvider.OpenMappedContext(CorrelationIdentifier.EnvKey, enricher._environmentProperty);
_version = logProvider.OpenMappedContext(CorrelationIdentifier.VersionKey, enricher._versionProperty);
_service = logProvider.OpenMappedContext(CorrelationIdentifier.ServiceKey, enricher._serviceProperty);
_traceId = logProvider.OpenMappedContext(CorrelationIdentifier.TraceIdKey, enricher._traceIdProperty);
_spanId = logProvider.OpenMappedContext(CorrelationIdentifier.SpanIdKey, enricher._spanIdProperty);
}
catch
{
// Clear the properties that are already mapped
Dispose();
throw;
}
}
public void Dispose()
{
_environment?.Dispose();
_version?.Dispose();
_service?.Dispose();
_traceId?.Dispose();
_spanId?.Dispose();
}
}
private class TracerProperty
{
private readonly IScopeManager _scopeManager;
private readonly Func<IScopeManager, string> _getter;
public TracerProperty(IScopeManager scopeManager, Func<IScopeManager, string> getter)
{
_scopeManager = scopeManager;
_getter = getter;
}
public override string ToString()
{
return _getter(_scopeManager);
}
}
}
}
| 1 | 24,968 | Using `Tracer.Instance` in here is problematic for testing It will likely cause some other tests to break I think - that's why we started passing in `IScopeManager` EDIT: I see you used `[TracerRestore]` - maybe that'll be enough! | DataDog-dd-trace-dotnet | .cs |
@@ -22,7 +22,10 @@ import javax.annotation.Nullable;
*/
@AutoValue
public abstract class InitFieldConfig {
+ public static final String projectIdVariableName = "project_id";
+
private static final String randomValueToken = "$RANDOM";
+ private static final String projectIdToken = "$PROJECT_ID";
public abstract String fieldPath();
| 1 | /* Copyright 2016 Google Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.api.codegen.metacode;
import com.google.auto.value.AutoValue;
import javax.annotation.Nullable;
/*
* A meta data class which stores the configuration data of a initialized field.
*/
@AutoValue
public abstract class InitFieldConfig {
private static final String randomValueToken = "$RANDOM";
public abstract String fieldPath();
@Nullable
public abstract String entityName();
@Nullable
public abstract String value();
/*
* Parses the given config string and returns the corresponding object.
*/
public static InitFieldConfig from(String initFieldConfigString) {
String fieldName = null;
String entityName = null;
String value = null;
String[] equalsParts = initFieldConfigString.split("[=]");
if (equalsParts.length > 2) {
throw new IllegalArgumentException("Inconsistent: found multiple '=' characters");
} else if (equalsParts.length == 2) {
value = parseValueString(equalsParts[1], equalsParts[0]);
}
String[] fieldSpecs = equalsParts[0].split("[%]");
fieldName = fieldSpecs[0];
if (fieldSpecs.length == 2) {
entityName = fieldSpecs[1];
} else if (fieldSpecs.length > 2) {
throw new IllegalArgumentException("Inconsistent: found multiple '%' characters");
}
return new AutoValue_InitFieldConfig(fieldName, entityName, value);
}
public boolean hasSimpleInitValue() {
return entityName() == null && value() != null;
}
public boolean isFormattedConfig() {
return entityName() != null;
}
public boolean hasFormattedInitValue() {
return entityName() != null && value() != null;
}
private static String parseValueString(String valueString, String stringToHash) {
if (valueString.contains(randomValueToken)) {
String randomValue = Integer.toString(Math.abs(stringToHash.hashCode()));
valueString = valueString.replace(randomValueToken, randomValue);
}
return valueString;
}
}
| 1 | 19,361 | make all of these `static final` fields UPPER_SNAKE - they are constants. | googleapis-gapic-generator | java |
@@ -3298,7 +3298,8 @@ int32 Mob::AffectMagicalDamage(int32 damage, uint16 spell_id, const bool iBuffTi
// If this is a DoT, use DoT Shielding...
if (iBuffTic) {
- damage -= (damage * itembonuses.DoTShielding / 100);
+ int total_dotshielding = itembonuses.DoTShielding + itembonuses.MitigateDotRune[SBIndex::MITIGATION_RUNE_PERCENT] + aabonuses.MitigateDotRune[SBIndex::MITIGATION_RUNE_PERCENT];
+ damage -= (damage * total_dotshielding / 100);
if (spellbonuses.MitigateDotRune[SBIndex::MITIGATION_RUNE_PERCENT]) {
slot = spellbonuses.MitigateDotRune[SBIndex::MITIGATION_RUNE_BUFFSLOT]; | 1 | /* EQEMu: Everquest Server Emulator
Copyright (C) 2001-2002 EQEMu Development Team (http://eqemulator.net)
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY except by those people which sell it, which
are required to give you total support for your newly bought product;
without even the implied warranty of MERCHANTABILITY or FITNESS FOR
A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include "../common/global_define.h"
#include "../common/eq_constants.h"
#include "../common/eq_packet_structs.h"
#include "../common/rulesys.h"
#include "../common/skills.h"
#include "../common/spdat.h"
#include "../common/string_util.h"
#include "../common/data_verification.h"
#include "../common/misc_functions.h"
#include "queryserv.h"
#include "quest_parser_collection.h"
#include "string_ids.h"
#include "water_map.h"
#include "worldserver.h"
#include "zone.h"
#include "lua_parser.h"
#include "fastmath.h"
#include "mob.h"
#include "npc.h"
#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#include <boost/concept_check.hpp>
#ifdef BOTS
#include "bot.h"
#endif
extern QueryServ* QServ;
extern WorldServer worldserver;
extern FastMath g_Math;
#ifdef _WINDOWS
#define snprintf _snprintf
#define strncasecmp _strnicmp
#define strcasecmp _stricmp
#endif
extern EntityList entity_list;
extern Zone* zone;
//SYNC WITH: tune.cpp, mob.h TuneAttackAnimation
EQ::skills::SkillType Mob::AttackAnimation(int Hand, const EQ::ItemInstance* weapon, EQ::skills::SkillType skillinuse)
{
// Determine animation
int type = 0;
if (weapon && weapon->IsClassCommon()) {
const EQ::ItemData* item = weapon->GetItem();
Log(Logs::Detail, Logs::Attack, "Weapon skill : %i", item->ItemType);
switch (item->ItemType) {
case EQ::item::ItemType1HSlash: // 1H Slashing
skillinuse = EQ::skills::Skill1HSlashing;
type = anim1HWeapon;
break;
case EQ::item::ItemType2HSlash: // 2H Slashing
skillinuse = EQ::skills::Skill2HSlashing;
type = anim2HSlashing;
break;
case EQ::item::ItemType1HPiercing: // Piercing
skillinuse = EQ::skills::Skill1HPiercing;
type = anim1HPiercing;
break;
case EQ::item::ItemType1HBlunt: // 1H Blunt
skillinuse = EQ::skills::Skill1HBlunt;
type = anim1HWeapon;
break;
case EQ::item::ItemType2HBlunt: // 2H Blunt
skillinuse = EQ::skills::Skill2HBlunt;
type = RuleB(Combat, Classic2HBAnimation) ? anim2HWeapon : anim2HSlashing;
break;
case EQ::item::ItemType2HPiercing: // 2H Piercing
if (IsClient() && CastToClient()->ClientVersion() < EQ::versions::ClientVersion::RoF2)
skillinuse = EQ::skills::Skill1HPiercing;
else
skillinuse = EQ::skills::Skill2HPiercing;
type = anim2HWeapon;
break;
case EQ::item::ItemTypeMartial:
skillinuse = EQ::skills::SkillHandtoHand;
type = animHand2Hand;
break;
default:
skillinuse = EQ::skills::SkillHandtoHand;
type = animHand2Hand;
break;
}// switch
}
else if (IsNPC()) {
switch (skillinuse) {
case EQ::skills::Skill1HSlashing: // 1H Slashing
type = anim1HWeapon;
break;
case EQ::skills::Skill2HSlashing: // 2H Slashing
type = anim2HSlashing;
break;
case EQ::skills::Skill1HPiercing: // Piercing
type = anim1HPiercing;
break;
case EQ::skills::Skill1HBlunt: // 1H Blunt
type = anim1HWeapon;
break;
case EQ::skills::Skill2HBlunt: // 2H Blunt
type = anim2HSlashing; //anim2HWeapon
break;
case EQ::skills::Skill2HPiercing: // 2H Piercing
type = anim2HWeapon;
break;
case EQ::skills::SkillHandtoHand:
type = animHand2Hand;
break;
default:
type = animHand2Hand;
break;
}// switch
}
else {
skillinuse = EQ::skills::SkillHandtoHand;
type = animHand2Hand;
}
// If we're attacking with the secondary hand, play the dual wield anim
if (Hand == EQ::invslot::slotSecondary) {// DW anim
type = animDualWield;
//allow animation chance to fire to be similar to your dw chance
if (GetDualWieldingSameDelayWeapons() == 2) {
SetDualWieldingSameDelayWeapons(3);
}
}
//If both weapons have same delay this allows a chance for DW animation
if (GetDualWieldingSameDelayWeapons() && Hand == EQ::invslot::slotPrimary) {
if (GetDualWieldingSameDelayWeapons() == 3 && zone->random.Roll(50)) {
type = animDualWield;
SetDualWieldingSameDelayWeapons(2);//Don't roll again till you do another dw attack.
}
SetDualWieldingSameDelayWeapons(2);//Ensures first attack is always primary.
}
DoAnim(type, 0, false);
return skillinuse;
}
//SYNC WITH: tune.cpp, mob.h Tunecompute_tohit
int Mob::compute_tohit(EQ::skills::SkillType skillinuse)
{
int tohit = GetSkill(EQ::skills::SkillOffense) + 7;
tohit += GetSkill(skillinuse);
if (IsNPC())
tohit += CastToNPC()->GetAccuracyRating();
if (IsClient()) {
double reduction = CastToClient()->m_pp.intoxication / 2.0;
if (reduction > 20.0) {
reduction = std::min((110 - reduction) / 100.0, 1.0);
tohit = reduction * static_cast<double>(tohit);
}
else if (IsBerserk()) {
tohit += (GetLevel() * 2) / 5;
}
}
return std::max(tohit, 1);
}
// return -1 in cases that always hit
//SYNC WITH: tune.cpp, mob.h TuneGetTotalToHit
int Mob::GetTotalToHit(EQ::skills::SkillType skill, int chance_mod)
{
if (chance_mod >= 10000) // override for stuff like SE_SkillAttack
return -1;
// calculate attacker's accuracy
auto accuracy = compute_tohit(skill) + 10; // add 10 in case the NPC's stats are fucked
if (chance_mod > 0) // multiplier
accuracy *= chance_mod;
// Torven parsed an apparent constant of 1.2 somewhere in here * 6 / 5 looks eqmathy to me!
// new test clients have 121 / 100
accuracy = (accuracy * 121) / 100;
// unsure on the stacking order of these effects, rather hard to parse
// item mod2 accuracy isn't applied to range? Theory crafting and parses back it up I guess
// mod2 accuracy -- flat bonus
if (skill != EQ::skills::SkillArchery && skill != EQ::skills::SkillThrowing)
accuracy += itembonuses.HitChance;
//518 Increase ATK accuracy by percentage, stackable
auto atkhit_bonus = itembonuses.Attack_Accuracy_Max_Percent + aabonuses.Attack_Accuracy_Max_Percent + spellbonuses.Attack_Accuracy_Max_Percent;
if (atkhit_bonus)
accuracy += round(static_cast<double>(accuracy) * static_cast<double>(atkhit_bonus) * 0.0001);
// 216 Melee Accuracy Amt aka SE_Accuracy -- flat bonus
accuracy += itembonuses.Accuracy[EQ::skills::HIGHEST_SKILL + 1] +
aabonuses.Accuracy[EQ::skills::HIGHEST_SKILL + 1] +
spellbonuses.Accuracy[EQ::skills::HIGHEST_SKILL + 1] +
itembonuses.Accuracy[skill] +
aabonuses.Accuracy[skill] +
spellbonuses.Accuracy[skill];
// auto hit discs (and looks like there are some autohit AAs)
if (spellbonuses.HitChanceEffect[skill] >= 10000 || aabonuses.HitChanceEffect[skill] >= 10000)
return -1;
if (spellbonuses.HitChanceEffect[EQ::skills::HIGHEST_SKILL + 1] >= 10000)
return -1;
// 184 Accuracy % aka SE_HitChance -- percentage increase
auto hit_bonus = itembonuses.HitChanceEffect[EQ::skills::HIGHEST_SKILL + 1] +
aabonuses.HitChanceEffect[EQ::skills::HIGHEST_SKILL + 1] +
spellbonuses.HitChanceEffect[EQ::skills::HIGHEST_SKILL + 1] +
itembonuses.HitChanceEffect[skill] +
aabonuses.HitChanceEffect[skill] +
spellbonuses.HitChanceEffect[skill];
accuracy = (accuracy * (100 + hit_bonus)) / 100;
// TODO: April 2003 added an archery/throwing PVP accuracy penalty while moving, should be in here some where,
// but PVP is less important so I haven't tried parsing it at all
// There is also 110 Ranger Archery Accuracy % which should probably be in here some where, but it's not in any spells/aas
// Name implies it's a percentage increase, if one wishes to implement, do it like the hit_bonus above but limited to ranger archery
// There is also 183 UNUSED - Skill Increase Chance which devs say isn't used at all in code, but some spells reference it
// I do not recommend implementing this once since there are spells that use it which would make this not live-like with default spell files
return accuracy;
}
// based on dev quotes
// the AGI bonus has actually drastically changed from classic
//SYNC WITH: tune.cpp, mob.h Tunecompute_defense
int Mob::compute_defense()
{
int defense = GetSkill(EQ::skills::SkillDefense) * 400 / 225;
defense += (8000 * (GetAGI() - 40)) / 36000;
if (IsClient())
defense += CastToClient()->GetHeroicAGI() / 10;
//516 SE_AC_Mitigation_Max_Percent
auto ac_bonus = itembonuses.AC_Mitigation_Max_Percent + aabonuses.AC_Mitigation_Max_Percent + spellbonuses.AC_Mitigation_Max_Percent;
if (ac_bonus)
defense += round(static_cast<double>(defense) * static_cast<double>(ac_bonus) * 0.0001);
defense += itembonuses.AvoidMeleeChance; // item mod2
if (IsNPC())
defense += CastToNPC()->GetAvoidanceRating();
if (IsClient()) {
double reduction = CastToClient()->m_pp.intoxication / 2.0;
if (reduction > 20.0) {
reduction = std::min((110 - reduction) / 100.0, 1.0);
defense = reduction * static_cast<double>(defense);
}
}
return std::max(1, defense);
}
// return -1 in cases that always miss
// SYNC WITH : tune.cpp, mob.h TuneGetTotalDefense()
int Mob::GetTotalDefense()
{
auto avoidance = compute_defense() + 10; // add 10 in case the NPC's stats are fucked
auto evasion_bonus = spellbonuses.AvoidMeleeChanceEffect; // we check this first since it has a special case
if (evasion_bonus >= 10000)
return -1;
// 515 SE_AC_Avoidance_Max_Percent
auto ac_aviodance_bonus = itembonuses.AC_Avoidance_Max_Percent + aabonuses.AC_Avoidance_Max_Percent + spellbonuses.AC_Avoidance_Max_Percent;
if (ac_aviodance_bonus)
avoidance += round(static_cast<double>(avoidance) * static_cast<double>(ac_aviodance_bonus) * 0.0001);
// 172 Evasion aka SE_AvoidMeleeChance
evasion_bonus += itembonuses.AvoidMeleeChanceEffect + aabonuses.AvoidMeleeChanceEffect; // item bonus here isn't mod2 avoidance
// 215 Pet Avoidance % aka SE_PetAvoidance
evasion_bonus += GetPetAvoidanceBonusFromOwner();
// Evasion is a percentage bonus according to AA descriptions
if (evasion_bonus)
avoidance = (avoidance * (100 + evasion_bonus)) / 100;
return avoidance;
}
// called when a mob is attacked, does the checks to see if it's a hit
// and does other mitigation checks. 'this' is the mob being attacked.
// SYNC WITH : tune.cpp, mob.h TuneCheckHitChance()
bool Mob::CheckHitChance(Mob* other, DamageHitInfo &hit)
{
#ifdef LUA_EQEMU
bool lua_ret = false;
bool ignoreDefault = false;
lua_ret = LuaParser::Instance()->CheckHitChance(this, other, hit, ignoreDefault);
if(ignoreDefault) {
return lua_ret;
}
#endif
Mob *attacker = other;
Mob *defender = this;
Log(Logs::Detail, Logs::Attack, "CheckHitChance(%s) attacked by %s", defender->GetName(), attacker->GetName());
if (defender->IsClient() && defender->CastToClient()->IsSitting())
return true;
auto avoidance = defender->GetTotalDefense();
if (avoidance == -1) // some sort of auto avoid disc
return false;
auto accuracy = hit.tohit;
if (accuracy == -1)
return true;
// so now we roll!
// relevant dev quote:
// Then your chance to simply avoid the attack is checked (defender's avoidance roll beat the attacker's accuracy roll.)
int tohit_roll = zone->random.Roll0(accuracy);
int avoid_roll = zone->random.Roll0(avoidance);
Log(Logs::Detail, Logs::Attack, "CheckHitChance accuracy(%d => %d) avoidance(%d => %d)", accuracy, tohit_roll, avoidance, avoid_roll);
// tie breaker? Don't want to be biased any one way
if (tohit_roll == avoid_roll)
return zone->random.Roll(50);
return tohit_roll > avoid_roll;
}
bool Mob::AvoidDamage(Mob *other, DamageHitInfo &hit)
{
#ifdef LUA_EQEMU
bool lua_ret = false;
bool ignoreDefault = false;
lua_ret = LuaParser::Instance()->AvoidDamage(this, other, hit, ignoreDefault);
if (ignoreDefault) {
return lua_ret;
}
#endif
/* called when a mob is attacked, does the checks to see if it's a hit
* and does other mitigation checks. 'this' is the mob being attacked.
*
* special return values:
* -1 - block
* -2 - parry
* -3 - riposte
* -4 - dodge
*
*/
/* Order according to current (SoF+?) dev quotes:
* https://forums.daybreakgames.com/eq/index.php?threads/test-update-06-10-15.223510/page-2#post-3261772
* https://forums.daybreakgames.com/eq/index.php?threads/test-update-06-10-15.223510/page-2#post-3268227
* Riposte 50, hDEX, must have weapon/fists, doesn't work on archery/throwing
* Block 25, hDEX, works on archery/throwing, behind block done here if back to attacker base1 is chance
* Parry 45, hDEX, doesn't work on throwing/archery, must be facing target
* Dodge 45, hAGI, works on archery/throwing, monks can dodge attacks from behind
* Shield Block, rand base1
* Staff Block, rand base1
* regular strike through
* avoiding the attack (CheckHitChance)
* As soon as one succeeds, none of the rest are checked
*
* Formula (all int math)
* (posted for parry, assume rest at the same)
* Chance = (((SKILL + 100) + [((SKILL+100) * SPA(175).Base1) / 100]) / 45) + [(hDex / 25) - min([hDex / 25], hStrikethrough)].
* hStrikethrough is a mob stat that was added to counter the bonuses of heroic stats
* Number rolled against 100, if the chance is greater than 100 it happens 100% of time
*
* Things with 10k accuracy mods can be avoided with these skills qq
*/
Mob *attacker = other;
Mob *defender = this;
bool InFront = !attacker->BehindMob(this, attacker->GetX(), attacker->GetY());
/*
This special ability adds a negative modifer to the defenders riposte/block/parry/chance
therefore reducing the defenders chance to successfully avoid the melee attack. At present
time this is the only way to fine tune counter these mods on players. This may
ultimately end up being more useful as fields in npc_types.
*/
int counter_all = 0;
int counter_riposte = 0;
int counter_block = 0;
int counter_parry = 0;
int counter_dodge = 0;
if (attacker->GetSpecialAbility(COUNTER_AVOID_DAMAGE)) {
counter_all = attacker->GetSpecialAbilityParam(COUNTER_AVOID_DAMAGE, 0);
counter_riposte = attacker->GetSpecialAbilityParam(COUNTER_AVOID_DAMAGE, 1);
counter_block = attacker->GetSpecialAbilityParam(COUNTER_AVOID_DAMAGE, 2);
counter_parry = attacker->GetSpecialAbilityParam(COUNTER_AVOID_DAMAGE, 3);
counter_dodge = attacker->GetSpecialAbilityParam(COUNTER_AVOID_DAMAGE, 4);
}
int modify_all = 0;
int modify_riposte = 0;
int modify_block = 0;
int modify_parry = 0;
int modify_dodge = 0;
if (GetSpecialAbility(MODIFY_AVOID_DAMAGE)) {
modify_all = GetSpecialAbilityParam(MODIFY_AVOID_DAMAGE, 0);
modify_riposte = GetSpecialAbilityParam(MODIFY_AVOID_DAMAGE, 1);
modify_block = GetSpecialAbilityParam(MODIFY_AVOID_DAMAGE, 2);
modify_parry = GetSpecialAbilityParam(MODIFY_AVOID_DAMAGE, 3);
modify_dodge = GetSpecialAbilityParam(MODIFY_AVOID_DAMAGE, 4);
}
// riposte -- it may seem crazy, but if the attacker has SPA 173 on them, they are immune to Ripo
bool ImmuneRipo = attacker->aabonuses.RiposteChance || attacker->spellbonuses.RiposteChance || attacker->itembonuses.RiposteChance || attacker->IsEnraged();
// Need to check if we have something in MainHand to actually attack with (or fists)
if (hit.hand != EQ::invslot::slotRange && (CanThisClassRiposte() || IsEnraged()) && InFront && !ImmuneRipo) {
if (IsEnraged()) {
hit.damage_done = DMG_RIPOSTED;
LogCombat("I am enraged, riposting frontal attack");
return true;
}
if (IsClient())
CastToClient()->CheckIncreaseSkill(EQ::skills::SkillRiposte, other, -10);
// check auto discs ... I guess aa/items too :P
if (spellbonuses.RiposteChance == 10000 || aabonuses.RiposteChance == 10000 || itembonuses.RiposteChance == 10000) {
hit.damage_done = DMG_RIPOSTED;
return true;
}
int chance = GetSkill(EQ::skills::SkillRiposte) + 100;
chance += (chance * (aabonuses.RiposteChance + spellbonuses.RiposteChance + itembonuses.RiposteChance)) / 100;
chance /= 50;
chance += itembonuses.HeroicDEX / 25; // live has "heroic strickthrough" here to counter
if (counter_riposte || counter_all) {
float counter = (counter_riposte + counter_all) / 100.0f;
chance -= chance * counter;
}
if (modify_riposte || modify_all) {
float npc_modifier = (modify_riposte + modify_all) / 100.0f;
chance += chance * npc_modifier;
}
// AA Slippery Attacks
if (hit.hand == EQ::invslot::slotSecondary) {
int slip = aabonuses.OffhandRiposteFail + itembonuses.OffhandRiposteFail + spellbonuses.OffhandRiposteFail;
chance += chance * slip / 100;
}
if (chance > 0 && zone->random.Roll(chance)) { // could be <0 from offhand stuff
hit.damage_done = DMG_RIPOSTED;
return true;
}
}
// block
bool bBlockFromRear = false;
// a successful roll on this does not mean a successful block is forthcoming. only that a chance to block
// from a direction other than the rear is granted.
int BlockBehindChance = aabonuses.BlockBehind + spellbonuses.BlockBehind + itembonuses.BlockBehind;
if (BlockBehindChance && zone->random.Roll(BlockBehindChance))
bBlockFromRear = true;
if (CanThisClassBlock() && (InFront || bBlockFromRear)) {
if (IsClient())
CastToClient()->CheckIncreaseSkill(EQ::skills::SkillBlock, other, -10);
// check auto discs ... I guess aa/items too :P
if (spellbonuses.IncreaseBlockChance == 10000 || aabonuses.IncreaseBlockChance == 10000 ||
itembonuses.IncreaseBlockChance == 10000) {
hit.damage_done = DMG_BLOCKED;
return true;
}
int chance = GetSkill(EQ::skills::SkillBlock) + 100;
chance += (chance * (aabonuses.IncreaseBlockChance + spellbonuses.IncreaseBlockChance + itembonuses.IncreaseBlockChance)) / 100;
chance /= 25;
chance += itembonuses.HeroicDEX / 25; // live has "heroic strickthrough" here to counter
if (counter_block || counter_all) {
float counter = (counter_block + counter_all) / 100.0f;
chance -= chance * counter;
}
if (modify_block || modify_all) {
float npc_modifier = (modify_block + modify_all) / 100.0f;
chance += chance * npc_modifier;
}
if (zone->random.Roll(chance)) {
hit.damage_done = DMG_BLOCKED;
return true;
}
}
// parry
if (CanThisClassParry() && InFront && hit.hand != EQ::invslot::slotRange) {
if (IsClient())
CastToClient()->CheckIncreaseSkill(EQ::skills::SkillParry, other, -10);
// check auto discs ... I guess aa/items too :P
if (spellbonuses.ParryChance == 10000 || aabonuses.ParryChance == 10000 || itembonuses.ParryChance == 10000) {
hit.damage_done = DMG_PARRIED;
return true;
}
int chance = GetSkill(EQ::skills::SkillParry) + 100;
chance += (chance * (aabonuses.ParryChance + spellbonuses.ParryChance + itembonuses.ParryChance)) / 100;
chance /= 45;
chance += itembonuses.HeroicDEX / 25; // live has "heroic strickthrough" here to counter
if (counter_parry || counter_all) {
float counter = (counter_parry + counter_all) / 100.0f;
chance -= chance * counter;
}
if (modify_parry || modify_all) {
float npc_modifier = (modify_parry + modify_all) / 100.0f;
chance += chance * npc_modifier;
}
if (zone->random.Roll(chance)) {
hit.damage_done = DMG_PARRIED;
return true;
}
}
// dodge
if (CanThisClassDodge() && (InFront || GetClass() == MONK)) {
if (IsClient())
CastToClient()->CheckIncreaseSkill(EQ::skills::SkillDodge, other, -10);
// check auto discs ... I guess aa/items too :P
if (spellbonuses.DodgeChance == 10000 || aabonuses.DodgeChance == 10000 || itembonuses.DodgeChance == 10000) {
hit.damage_done = DMG_DODGED;
return true;
}
int chance = GetSkill(EQ::skills::SkillDodge) + 100;
chance += (chance * (aabonuses.DodgeChance + spellbonuses.DodgeChance + itembonuses.DodgeChance)) / 100;
chance /= 45;
chance += itembonuses.HeroicAGI / 25; // live has "heroic strickthrough" here to counter
if (counter_dodge || counter_all) {
float counter = (counter_dodge + counter_all) / 100.0f;
chance -= chance * counter;
}
if (modify_dodge || modify_all) {
float npc_modifier = (modify_dodge + modify_all) / 100.0f;
chance += chance * npc_modifier;
}
if (zone->random.Roll(chance)) {
hit.damage_done = DMG_DODGED;
return true;
}
}
// Try Shield Block OR TwoHandBluntBlockCheck
if (HasShieldEquiped() && (aabonuses.ShieldBlock || spellbonuses.ShieldBlock || itembonuses.ShieldBlock) && (InFront || bBlockFromRear)) {
int chance = aabonuses.ShieldBlock + spellbonuses.ShieldBlock + itembonuses.ShieldBlock;
if (counter_block || counter_all) {
float counter = (counter_block + counter_all) / 100.0f;
chance -= chance * counter;
}
if (zone->random.Roll(chance)) {
hit.damage_done = DMG_BLOCKED;
return true;
}
}
if (HasTwoHandBluntEquiped() && (aabonuses.TwoHandBluntBlock || spellbonuses.TwoHandBluntBlock || itembonuses.TwoHandBluntBlock) && (InFront || bBlockFromRear)) {
int chance = aabonuses.TwoHandBluntBlock + itembonuses.TwoHandBluntBlock + spellbonuses.TwoHandBluntBlock;
if (counter_block || counter_all) {
float counter = (counter_block + counter_all) / 100.0f;
chance -= chance * counter;
}
if (zone->random.Roll(chance)) {
hit.damage_done = DMG_BLOCKED;
return true;
}
}
return false;
}
int Mob::GetACSoftcap()
{
// from test server Resources/ACMitigation.txt
static int war_softcaps[] = {
312, 314, 316, 318, 320, 322, 324, 326, 328, 330, 332, 334, 336, 338, 340, 342, 344, 346, 348, 350, 352,
354, 356, 358, 360, 362, 364, 366, 368, 370, 372, 374, 376, 378, 380, 382, 384, 386, 388, 390, 392, 394,
396, 398, 400, 402, 404, 406, 408, 410, 412, 414, 416, 418, 420, 422, 424, 426, 428, 430, 432, 434, 436,
438, 440, 442, 444, 446, 448, 450, 452, 454, 456, 458, 460, 462, 464, 466, 468, 470, 472, 474, 476, 478,
480, 482, 484, 486, 488, 490, 492, 494, 496, 498, 500, 502, 504, 506, 508, 510, 512, 514, 516, 518, 520
};
static int clrbrdmnk_softcaps[] = {
274, 276, 278, 278, 280, 282, 284, 286, 288, 290, 292, 292, 294, 296, 298, 300, 302, 304, 306, 308, 308,
310, 312, 314, 316, 318, 320, 322, 322, 324, 326, 328, 330, 332, 334, 336, 336, 338, 340, 342, 344, 346,
348, 350, 352, 352, 354, 356, 358, 360, 362, 364, 366, 366, 368, 370, 372, 374, 376, 378, 380, 380, 382,
384, 386, 388, 390, 392, 394, 396, 396, 398, 400, 402, 404, 406, 408, 410, 410, 412, 414, 416, 418, 420,
422, 424, 424, 426, 428, 430, 432, 434, 436, 438, 440, 440, 442, 444, 446, 448, 450, 452, 454, 454, 456
};
static int palshd_softcaps[] = {
298, 300, 302, 304, 306, 308, 310, 312, 314, 316, 318, 320, 322, 324, 326, 328, 330, 332, 334, 336, 336,
338, 340, 342, 344, 346, 348, 350, 352, 354, 356, 358, 360, 362, 364, 366, 368, 370, 372, 374, 376, 378,
380, 382, 384, 384, 386, 388, 390, 392, 394, 396, 398, 400, 402, 404, 406, 408, 410, 412, 414, 416, 418,
420, 422, 424, 426, 428, 430, 432, 432, 434, 436, 438, 440, 442, 444, 446, 448, 450, 452, 454, 456, 458,
460, 462, 464, 466, 468, 470, 472, 474, 476, 478, 480, 480, 482, 484, 486, 488, 490, 492, 494, 496, 498
};
static int rng_softcaps[] = {
286, 288, 290, 292, 294, 296, 298, 298, 300, 302, 304, 306, 308, 310, 312, 314, 316, 318, 320, 322, 322,
324, 326, 328, 330, 332, 334, 336, 338, 340, 342, 344, 344, 346, 348, 350, 352, 354, 356, 358, 360, 362,
364, 366, 368, 368, 370, 372, 374, 376, 378, 380, 382, 384, 386, 388, 390, 390, 392, 394, 396, 398, 400,
402, 404, 406, 408, 410, 412, 414, 414, 416, 418, 420, 422, 424, 426, 428, 430, 432, 434, 436, 436, 438,
440, 442, 444, 446, 448, 450, 452, 454, 456, 458, 460, 460, 462, 464, 466, 468, 470, 472, 474, 476, 478
};
static int dru_softcaps[] = {
254, 256, 258, 260, 262, 264, 264, 266, 268, 270, 272, 272, 274, 276, 278, 280, 282, 282, 284, 286, 288,
290, 290, 292, 294, 296, 298, 300, 300, 302, 304, 306, 308, 308, 310, 312, 314, 316, 318, 318, 320, 322,
324, 326, 328, 328, 330, 332, 334, 336, 336, 338, 340, 342, 344, 346, 346, 348, 350, 352, 354, 354, 356,
358, 360, 362, 364, 364, 366, 368, 370, 372, 372, 374, 376, 378, 380, 382, 382, 384, 386, 388, 390, 390,
392, 394, 396, 398, 400, 400, 402, 404, 406, 408, 410, 410, 412, 414, 416, 418, 418, 420, 422, 424, 426
};
static int rogshmbstber_softcaps[] = {
264, 266, 268, 270, 272, 272, 274, 276, 278, 280, 282, 282, 284, 286, 288, 290, 292, 294, 294, 296, 298,
300, 302, 304, 306, 306, 308, 310, 312, 314, 316, 316, 318, 320, 322, 324, 326, 328, 328, 330, 332, 334,
336, 338, 340, 340, 342, 344, 346, 348, 350, 350, 352, 354, 356, 358, 360, 362, 362, 364, 366, 368, 370,
372, 374, 374, 376, 378, 380, 382, 384, 384, 386, 388, 390, 392, 394, 396, 396, 398, 400, 402, 404, 406,
408, 408, 410, 412, 414, 416, 418, 418, 420, 422, 424, 426, 428, 430, 430, 432, 434, 436, 438, 440, 442
};
static int necwizmagenc_softcaps[] = {
248, 250, 252, 254, 256, 256, 258, 260, 262, 264, 264, 266, 268, 270, 272, 272, 274, 276, 278, 280, 280,
282, 284, 286, 288, 288, 290, 292, 294, 296, 296, 298, 300, 302, 304, 304, 306, 308, 310, 312, 312, 314,
316, 318, 320, 320, 322, 324, 326, 328, 328, 330, 332, 334, 336, 336, 338, 340, 342, 344, 344, 346, 348,
350, 352, 352, 354, 356, 358, 360, 360, 362, 364, 366, 368, 368, 370, 372, 374, 376, 376, 378, 380, 382,
384, 384, 386, 388, 390, 392, 392, 394, 396, 398, 400, 400, 402, 404, 406, 408, 408, 410, 412, 414, 416
};
int level = std::min(105, static_cast<int>(GetLevel())) - 1;
switch (GetClass()) {
case WARRIOR:
return war_softcaps[level];
case CLERIC:
case BARD:
case MONK:
return clrbrdmnk_softcaps[level];
case PALADIN:
case SHADOWKNIGHT:
return palshd_softcaps[level];
case RANGER:
return rng_softcaps[level];
case DRUID:
return dru_softcaps[level];
case ROGUE:
case SHAMAN:
case BEASTLORD:
case BERSERKER:
return rogshmbstber_softcaps[level];
case NECROMANCER:
case WIZARD:
case MAGICIAN:
case ENCHANTER:
return necwizmagenc_softcaps[level];
default:
return 350;
}
}
double Mob::GetSoftcapReturns()
{
// These are based on the dev post, they seem to be correct for every level
// AKA no more hard caps
switch (GetClass()) {
case WARRIOR:
return 0.35;
case CLERIC:
case BARD:
case MONK:
return 0.3;
case PALADIN:
case SHADOWKNIGHT:
return 0.33;
case RANGER:
return 0.315;
case DRUID:
return 0.265;
case ROGUE:
case SHAMAN:
case BEASTLORD:
case BERSERKER:
return 0.28;
case NECROMANCER:
case WIZARD:
case MAGICIAN:
case ENCHANTER:
return 0.25;
default:
return 0.3;
}
}
int Mob::GetClassRaceACBonus()
{
int ac_bonus = 0;
auto level = GetLevel();
if (GetClass() == MONK) {
int hardcap = 30;
int softcap = 14;
if (level > 99) {
hardcap = 58;
softcap = 35;
}
else if (level > 94) {
hardcap = 57;
softcap = 34;
}
else if (level > 89) {
hardcap = 56;
softcap = 33;
}
else if (level > 84) {
hardcap = 55;
softcap = 32;
}
else if (level > 79) {
hardcap = 54;
softcap = 31;
}
else if (level > 74) {
hardcap = 53;
softcap = 30;
}
else if (level > 69) {
hardcap = 53;
softcap = 28;
}
else if (level > 64) {
hardcap = 53;
softcap = 26;
}
else if (level > 63) {
hardcap = 50;
softcap = 24;
}
else if (level > 61) {
hardcap = 47;
softcap = 24;
}
else if (level > 59) {
hardcap = 45;
softcap = 24;
}
else if (level > 54) {
hardcap = 40;
softcap = 20;
}
else if (level > 50) {
hardcap = 38;
softcap = 18;
}
else if (level > 44) {
hardcap = 36;
softcap = 17;
}
else if (level > 29) {
hardcap = 34;
softcap = 16;
}
else if (level > 14) {
hardcap = 32;
softcap = 15;
}
int weight = IsClient() ? CastToClient()->CalcCurrentWeight()/10 : 0;
if (weight < hardcap - 1) {
double temp = level + 5;
if (weight > softcap) {
double redux = static_cast<double>(weight - softcap) * 6.66667;
redux = (100.0 - std::min(100.0, redux)) * 0.01;
temp = std::max(0.0, temp * redux);
}
ac_bonus = static_cast<int>((4.0 * temp) / 3.0);
}
else if (weight > hardcap + 1) {
double temp = level + 5;
double multiplier = std::min(1.0, (weight - (static_cast<double>(hardcap) - 10.0)) / 100.0);
temp = (4.0 * temp) / 3.0;
ac_bonus -= static_cast<int>(temp * multiplier);
}
}
if (GetClass() == ROGUE) {
int level_scaler = level - 26;
if (GetAGI() < 80)
ac_bonus = level_scaler / 4;
else if (GetAGI() < 85)
ac_bonus = (level_scaler * 2) / 4;
else if (GetAGI() < 90)
ac_bonus = (level_scaler * 3) / 4;
else if (GetAGI() < 100)
ac_bonus = (level_scaler * 4) / 4;
else if (GetAGI() >= 100)
ac_bonus = (level_scaler * 5) / 4;
if (ac_bonus > 12)
ac_bonus = 12;
}
if (GetClass() == BEASTLORD) {
int level_scaler = level - 6;
if (GetAGI() < 80)
ac_bonus = level_scaler / 5;
else if (GetAGI() < 85)
ac_bonus = (level_scaler * 2) / 5;
else if (GetAGI() < 90)
ac_bonus = (level_scaler * 3) / 5;
else if (GetAGI() < 100)
ac_bonus = (level_scaler * 4) / 5;
else if (GetAGI() >= 100)
ac_bonus = (level_scaler * 5) / 5;
if (ac_bonus > 16)
ac_bonus = 16;
}
if (GetRace() == IKSAR)
ac_bonus += EQ::Clamp(static_cast<int>(level), 10, 35);
return ac_bonus;
}
//SYNC WITH: tune.cpp, mob.h TuneACSum
int Mob::ACSum(bool skip_caps)
{
int ac = 0; // this should be base AC whenever shrouds come around
ac += itembonuses.AC; // items + food + tribute
int shield_ac = 0;
if (HasShieldEquiped() && IsClient()) {
auto client = CastToClient();
auto inst = client->GetInv().GetItem(EQ::invslot::slotSecondary);
if (inst) {
if (inst->GetItemRecommendedLevel(true) <= GetLevel())
shield_ac = inst->GetItemArmorClass(true);
else
shield_ac = client->CalcRecommendedLevelBonus(GetLevel(), inst->GetItemRecommendedLevel(true), inst->GetItemArmorClass(true));
}
shield_ac += client->GetHeroicSTR() / 10;
}
// EQ math
ac = (ac * 4) / 3;
// anti-twink
if (!skip_caps && IsClient() && GetLevel() < RuleI(Combat, LevelToStopACTwinkControl))
ac = std::min(ac, 25 + 6 * GetLevel());
ac = std::max(0, ac + GetClassRaceACBonus());
if (IsNPC()) {
// This is the developer tweaked number
// for the VAST amount of NPCs in EQ this number didn't exceed 600 until recently (PoWar)
// According to the guild hall Combat Dummies, a level 50 classic EQ mob it should be ~115
// For a 60 PoP mob ~120, 70 OoW ~120
ac += GetAC();
ac += GetPetACBonusFromOwner();
auto spell_aa_ac = aabonuses.AC + spellbonuses.AC;
ac += GetSkill(EQ::skills::SkillDefense) / 5;
if (EQ::ValueWithin(static_cast<int>(GetClass()), NECROMANCER, ENCHANTER))
ac += spell_aa_ac / 3;
else
ac += spell_aa_ac / 4;
}
else { // TODO: so we can't set NPC skills ... so the skill bonus ends up being HUGE so lets nerf them a bit
auto spell_aa_ac = aabonuses.AC + spellbonuses.AC;
if (EQ::ValueWithin(static_cast<int>(GetClass()), NECROMANCER, ENCHANTER))
ac += GetSkill(EQ::skills::SkillDefense) / 2 + spell_aa_ac / 3;
else
ac += GetSkill(EQ::skills::SkillDefense) / 3 + spell_aa_ac / 4;
}
if (GetAGI() > 70)
ac += GetAGI() / 20;
if (ac < 0)
ac = 0;
if (!skip_caps && (IsClient()
#ifdef BOTS
|| IsBot()
#endif
)) {
auto softcap = GetACSoftcap();
auto returns = GetSoftcapReturns();
int total_aclimitmod = aabonuses.CombatStability + itembonuses.CombatStability + spellbonuses.CombatStability;
if (total_aclimitmod)
softcap = (softcap * (100 + total_aclimitmod)) / 100;
softcap += shield_ac;
if (ac > softcap) {
auto over_cap = ac - softcap;
ac = softcap + (over_cap * returns);
}
LogCombatDetail("ACSum ac [{}] softcap [{}] returns [{}]", ac, softcap, returns);
}
else {
LogCombatDetail("ACSum ac [{}]", ac);
}
return ac;
}
int Mob::GetBestMeleeSkill()
{
int bestSkill=0;
EQ::skills::SkillType meleeSkills[]=
{ EQ::skills::Skill1HBlunt,
EQ::skills::Skill1HSlashing,
EQ::skills::Skill2HBlunt,
EQ::skills::Skill2HSlashing,
EQ::skills::SkillHandtoHand,
EQ::skills::Skill1HPiercing,
EQ::skills::Skill2HPiercing,
EQ::skills::SkillCount
};
int i;
for (i=0; meleeSkills[i] != EQ::skills::SkillCount; ++i) {
int value;
value = GetSkill(meleeSkills[i]);
bestSkill = std::max(value, bestSkill);
}
return bestSkill;
}
//SYNC WITH: tune.cpp, mob.h Tuneoffense
int Mob::offense(EQ::skills::SkillType skill)
{
int offense = GetSkill(skill);
int stat_bonus = GetSTR();
switch (skill) {
case EQ::skills::SkillArchery:
case EQ::skills::SkillThrowing:
stat_bonus = GetDEX();
break;
// Mobs with no weapons default to H2H.
// Since H2H is capped at 100 for many many classes,
// lets not handicap mobs based on not spawning with a
// weapon.
//
// Maybe we tweak this if Disarm is actually implemented.
case EQ::skills::SkillHandtoHand:
offense = GetBestMeleeSkill();
break;
}
if (stat_bonus >= 75)
offense += (2 * stat_bonus - 150) / 3;
offense += GetATK() + GetPetATKBonusFromOwner();
return offense;
}
// this assumes "this" is the defender
// this returns between 0.1 to 2.0
double Mob::RollD20(int offense, int mitigation)
{
static double mods[] = {
0.1, 0.2, 0.3, 0.4, 0.5,
0.6, 0.7, 0.8, 0.9, 1.0,
1.1, 1.2, 1.3, 1.4, 1.5,
1.6, 1.7, 1.8, 1.9, 2.0
};
if (IsClient() && CastToClient()->IsSitting())
return mods[19];
auto atk_roll = zone->random.Roll0(offense + 5);
auto def_roll = zone->random.Roll0(mitigation + 5);
int avg = (offense + mitigation + 10) / 2;
int index = std::max(0, (atk_roll - def_roll) + (avg / 2));
index = EQ::Clamp((index * 20) / avg, 0, 19);
return mods[index];
}
//SYNC WITH: tune.cpp, mob.h TuneMeleeMitigation
void Mob::MeleeMitigation(Mob *attacker, DamageHitInfo &hit, ExtraAttackOptions *opts)
{
#ifdef LUA_EQEMU
bool ignoreDefault = false;
LuaParser::Instance()->MeleeMitigation(this, attacker, hit, opts, ignoreDefault);
if (ignoreDefault) {
return;
}
#endif
if (hit.damage_done < 0 || hit.base_damage == 0)
return;
Mob* defender = this;
auto mitigation = defender->GetMitigationAC();
if (IsClient() && attacker->IsClient())
mitigation = mitigation * 80 / 100; // 2004 PvP changes
if (opts) {
mitigation *= (1.0f - opts->armor_pen_percent);
mitigation -= opts->armor_pen_flat;
}
auto roll = RollD20(hit.offense, mitigation);
// +0.5 for rounding, min to 1 dmg
hit.damage_done = std::max(static_cast<int>(roll * static_cast<double>(hit.base_damage) + 0.5), 1);
Log(Logs::Detail, Logs::Attack, "mitigation %d vs offense %d. base %d rolled %f damage %d", mitigation, hit.offense, hit.base_damage, roll, hit.damage_done);
}
//Returns the weapon damage against the input mob
//if we cannot hit the mob with the current weapon we will get a value less than or equal to zero
//Else we know we can hit.
//GetWeaponDamage(mob*, const EQ::ItemData*) is intended to be used for mobs or any other situation where we do not have a client inventory item
//GetWeaponDamage(mob*, const EQ::ItemInstance*) is intended to be used for situations where we have a client inventory item
int Mob::GetWeaponDamage(Mob *against, const EQ::ItemData *weapon_item) {
int dmg = 0;
int banedmg = 0;
//can't hit invulnerable stuff with weapons.
if (against->GetInvul() || against->GetSpecialAbility(IMMUNE_MELEE)) {
return 0;
}
//check to see if our weapons or fists are magical.
if (against->GetSpecialAbility(IMMUNE_MELEE_NONMAGICAL)) {
if (GetSpecialAbility(SPECATK_MAGICAL)) {
dmg = 1;
}
//On live this occurs for ALL NPC's >= 10
else if (IsNPC() && GetLevel() >= RuleI(Combat, NPCAttackMagicLevel)) {
dmg = 1;
}
else if (weapon_item) {
if (weapon_item->Magic) {
if (weapon_item->Damage && (weapon_item->IsType1HWeapon() || weapon_item->IsType2HWeapon())) {
dmg = weapon_item->Damage;
}
//Non weapon items, ie. boots for kick.
else if (weapon_item->ItemType == EQ::item::ItemTypeArmor) {
dmg = 1;
}
else {
return 0;
}
}
else {
return 0;
}
}
else if ((GetClass() == MONK || GetClass() == BEASTLORD) && GetLevel() >= 30) {
dmg = GetHandToHandDamage();
}
else {
return 0;
}
}
else {
if (weapon_item) {
dmg = weapon_item->Damage;
dmg = dmg <= 0 ? 1 : dmg;
}
else {
dmg = GetHandToHandDamage();
}
}
int eledmg = 0;
if (!against->GetSpecialAbility(IMMUNE_MAGIC)) {
if (weapon_item && weapon_item->ElemDmgAmt) {
//we don't check resist for npcs here
eledmg = weapon_item->ElemDmgAmt;
dmg += eledmg;
}
}
if (against->GetSpecialAbility(IMMUNE_MELEE_EXCEPT_BANE)) {
if (weapon_item) {
if (weapon_item->BaneDmgBody == against->GetBodyType()) {
banedmg += weapon_item->BaneDmgAmt;
}
if (weapon_item->BaneDmgRace == against->GetRace()) {
banedmg += weapon_item->BaneDmgRaceAmt;
}
}
if (!banedmg) {
if (!GetSpecialAbility(SPECATK_BANE))
return 0;
else
return 1;
}
else
dmg += banedmg;
}
else {
if (weapon_item) {
if (weapon_item->BaneDmgBody == against->GetBodyType()) {
banedmg += weapon_item->BaneDmgAmt;
}
if (weapon_item->BaneDmgRace == against->GetRace()) {
banedmg += weapon_item->BaneDmgRaceAmt;
}
}
dmg += (banedmg + eledmg);
}
if (dmg <= 0) {
return 0;
}
else
return dmg;
}
int Mob::GetWeaponDamage(Mob *against, const EQ::ItemInstance *weapon_item, uint32 *hate)
{
int dmg = 0;
int banedmg = 0;
int x = 0;
if (!against || against->GetInvul() || against->GetSpecialAbility(IMMUNE_MELEE))
return 0;
// check for items being illegally attained
if (weapon_item) {
if (!weapon_item->GetItem())
return 0;
if (weapon_item->GetItemRequiredLevel(true) > GetLevel())
return 0;
if (!weapon_item->IsEquipable(GetBaseRace(), GetClass()))
return 0;
}
if (against->GetSpecialAbility(IMMUNE_MELEE_NONMAGICAL)) {
if (weapon_item) {
// check to see if the weapon is magic
bool MagicWeapon = weapon_item->GetItemMagical(true) || spellbonuses.MagicWeapon || itembonuses.MagicWeapon;
if (MagicWeapon) {
auto rec_level = weapon_item->GetItemRecommendedLevel(true);
if (IsClient() && GetLevel() < rec_level)
dmg = CastToClient()->CalcRecommendedLevelBonus(
GetLevel(), rec_level, weapon_item->GetItemWeaponDamage(true));
else
dmg = weapon_item->GetItemWeaponDamage(true);
dmg = dmg <= 0 ? 1 : dmg;
}
else {
return 0;
}
}
else {
bool MagicGloves = false;
if (IsClient()) {
const EQ::ItemInstance *gloves = CastToClient()->GetInv().GetItem(EQ::invslot::slotHands);
if (gloves)
MagicGloves = gloves->GetItemMagical(true);
}
if (GetClass() == MONK || GetClass() == BEASTLORD) {
if (MagicGloves || GetLevel() >= 30) {
dmg = GetHandToHandDamage();
if (hate)
*hate += dmg;
}
}
else if (GetOwner() &&
GetLevel() >=
RuleI(Combat, PetAttackMagicLevel)) { // pets wouldn't actually use this but...
dmg = 1; // it gives us an idea if we can hit
}
else if (MagicGloves || GetSpecialAbility(SPECATK_MAGICAL)) {
dmg = 1;
}
else
return 0;
}
}
else {
if (weapon_item) {
if (weapon_item->GetItem()) {
auto rec_level = weapon_item->GetItemRecommendedLevel(true);
if (IsClient() && GetLevel() < rec_level) {
dmg = CastToClient()->CalcRecommendedLevelBonus(
GetLevel(), rec_level, weapon_item->GetItemWeaponDamage(true));
}
else {
dmg = weapon_item->GetItemWeaponDamage(true);
}
dmg = dmg <= 0 ? 1 : dmg;
}
}
else {
dmg = GetHandToHandDamage();
if (hate)
*hate += dmg;
}
}
int eledmg = 0;
if (!against->GetSpecialAbility(IMMUNE_MAGIC)) {
if (weapon_item && weapon_item->GetItem() && weapon_item->GetItemElementalFlag(true))
// the client actually has the way this is done, it does not appear to check req!
eledmg = against->ResistElementalWeaponDmg(weapon_item);
}
if (weapon_item && weapon_item->GetItem() &&
(weapon_item->GetItemBaneDamageBody(true) || weapon_item->GetItemBaneDamageRace(true)))
banedmg = against->CheckBaneDamage(weapon_item);
if (against->GetSpecialAbility(IMMUNE_MELEE_EXCEPT_BANE)) {
if (!banedmg) {
if (!GetSpecialAbility(SPECATK_BANE))
return 0;
else
return 1;
}
else {
dmg += (banedmg + eledmg);
if (hate)
*hate += banedmg;
}
}
else {
dmg += (banedmg + eledmg);
if (hate)
*hate += banedmg;
}
return std::max(0, dmg);
}
int Client::DoDamageCaps(int base_damage)
{
// this is based on a client function that caps melee base_damage
auto level = GetLevel();
auto stop_level = RuleI(Combat, LevelToStopDamageCaps);
if (stop_level && stop_level <= level)
return base_damage;
int cap = 0;
if (level >= 125) {
cap = 7 * level;
}
else if (level >= 110) {
cap = 6 * level;
}
else if (level >= 90) {
cap = 5 * level;
}
else if (level >= 70) {
cap = 4 * level;
}
else if (level >= 40) {
switch (GetClass()) {
case CLERIC:
case DRUID:
case SHAMAN:
cap = 80;
break;
case NECROMANCER:
case WIZARD:
case MAGICIAN:
case ENCHANTER:
cap = 40;
break;
default:
cap = 200;
break;
}
}
else if (level >= 30) {
switch (GetClass()) {
case CLERIC:
case DRUID:
case SHAMAN:
cap = 26;
break;
case NECROMANCER:
case WIZARD:
case MAGICIAN:
case ENCHANTER:
cap = 18;
break;
default:
cap = 60;
break;
}
}
else if (level >= 20) {
switch (GetClass()) {
case CLERIC:
case DRUID:
case SHAMAN:
cap = 20;
break;
case NECROMANCER:
case WIZARD:
case MAGICIAN:
case ENCHANTER:
cap = 12;
break;
default:
cap = 30;
break;
}
}
else if (level >= 10) {
switch (GetClass()) {
case CLERIC:
case DRUID:
case SHAMAN:
cap = 12;
break;
case NECROMANCER:
case WIZARD:
case MAGICIAN:
case ENCHANTER:
cap = 10;
break;
default:
cap = 14;
break;
}
}
else {
switch (GetClass()) {
case CLERIC:
case DRUID:
case SHAMAN:
cap = 9;
break;
case NECROMANCER:
case WIZARD:
case MAGICIAN:
case ENCHANTER:
cap = 6;
break;
default:
cap = 10; // this is where the 20 damage cap comes from
break;
}
}
return std::min(cap, base_damage);
}
// other is the defender, this is the attacker
//SYNC WITH: tune.cpp, mob.h TuneDoAttack
void Mob::DoAttack(Mob *other, DamageHitInfo &hit, ExtraAttackOptions *opts)
{
if (!other)
return;
LogCombat("[{}]::DoAttack vs [{}] base [{}] min [{}] offense [{}] tohit [{}] skill [{}]", GetName(),
other->GetName(), hit.base_damage, hit.min_damage, hit.offense, hit.tohit, hit.skill);
// check to see if we hit..
if (other->AvoidDamage(this, hit)) {
int strike_through = itembonuses.StrikeThrough + spellbonuses.StrikeThrough + aabonuses.StrikeThrough;
if (strike_through && zone->random.Roll(strike_through)) {
MessageString(Chat::StrikeThrough,
STRIKETHROUGH_STRING); // You strike through your opponents defenses!
hit.damage_done = 1; // set to one, we will check this to continue
}
// I'm pretty sure you can riposte a riposte
if (hit.damage_done == DMG_RIPOSTED) {
DoRiposte(other);
//if (IsDead())
return;
}
LogCombat("Avoided/strikethrough damage with code [{}]", hit.damage_done);
}
if (hit.damage_done >= 0) {
if (other->CheckHitChance(this, hit)) {
if (IsNPC() && other->IsClient() && other->animation > 0 && GetLevel() >= 5 && BehindMob(other, GetX(), GetY())) {
// ~ 12% chance
if (zone->random.Roll(12)) {
int stun_resist2 = other->spellbonuses.FrontalStunResist + other->itembonuses.FrontalStunResist + other->aabonuses.FrontalStunResist;
int stun_resist = other->spellbonuses.StunResist + other->itembonuses.StunResist + other->aabonuses.StunResist;
if (zone->random.Roll(stun_resist2)) {
other->MessageString(Chat::Stun, AVOID_STUNNING_BLOW);
} else if (zone->random.Roll(stun_resist)) {
other->MessageString(Chat::Stun, SHAKE_OFF_STUN);
} else {
other->Stun(3000); // yuck -- 3 seconds
}
}
}
other->MeleeMitigation(this, hit, opts);
if (hit.damage_done > 0) {
ApplyDamageTable(hit);
CommonOutgoingHitSuccess(other, hit, opts);
}
LogCombat("Final damage after all reductions: [{}]", hit.damage_done);
}
else {
LogCombat("Attack missed. Damage set to 0");
hit.damage_done = 0;
}
}
}
//note: throughout this method, setting `damage` to a negative is a way to
//stop the attack calculations
// IsFromSpell added to allow spell effects to use Attack. (Mainly for the Rampage AA right now.)
//SYNC WITH: tune.cpp, mob.h TuneClientAttack
bool Client::Attack(Mob* other, int Hand, bool bRiposte, bool IsStrikethrough, bool IsFromSpell, ExtraAttackOptions *opts)
{
if (!other) {
SetTarget(nullptr);
LogError("A null Mob object was passed to Client::Attack() for evaluation!");
return false;
}
if (!GetTarget())
SetTarget(other);
LogCombat("Attacking [{}] with hand [{}] [{}]", other ? other->GetName() : "(nullptr)", Hand, bRiposte ? "(this is a riposte)" : "");
//SetAttackTimer();
if (
(IsCasting() && GetClass() != BARD && !IsFromSpell)
|| other == nullptr
|| ((IsClient() && CastToClient()->dead) || (other->IsClient() && other->CastToClient()->dead))
|| (GetHP() < 0)
|| (!IsAttackAllowed(other))
) {
LogCombat("Attack cancelled, invalid circumstances");
return false; // Only bards can attack while casting
}
if (DivineAura() && !GetGM()) {//cant attack while invulnerable unless your a gm
LogCombat("Attack cancelled, Divine Aura is in effect");
MessageString(Chat::DefaultText, DIVINE_AURA_NO_ATK); //You can't attack while invulnerable
return false;
}
if (GetFeigned())
return false; // Rogean: How can you attack while feigned? Moved up from Aggro Code.
EQ::ItemInstance* weapon = nullptr;
if (Hand == EQ::invslot::slotSecondary) { // Kaiyodo - Pick weapon from the attacking hand
weapon = GetInv().GetItem(EQ::invslot::slotSecondary);
OffHandAtk(true);
}
else {
weapon = GetInv().GetItem(EQ::invslot::slotPrimary);
OffHandAtk(false);
}
if (weapon != nullptr) {
if (!weapon->IsWeapon()) {
LogCombat("Attack cancelled, Item [{}] ([{}]) is not a weapon", weapon->GetItem()->Name, weapon->GetID());
return(false);
}
LogCombat("Attacking with weapon: [{}] ([{}])", weapon->GetItem()->Name, weapon->GetID());
}
else {
LogCombat("Attacking without a weapon");
}
DamageHitInfo my_hit;
// calculate attack_skill and skillinuse depending on hand and weapon
// also send Packet to near clients
my_hit.skill = AttackAnimation(Hand, weapon);
LogCombat("Attacking with [{}] in slot [{}] using skill [{}]", weapon ? weapon->GetItem()->Name : "Fist", Hand, my_hit.skill);
// Now figure out damage
my_hit.damage_done = 1;
my_hit.min_damage = 0;
uint8 mylevel = GetLevel() ? GetLevel() : 1;
uint32 hate = 0;
if (weapon)
hate = (weapon->GetItem()->Damage + weapon->GetItem()->ElemDmgAmt);
my_hit.base_damage = GetWeaponDamage(other, weapon, &hate);
if (hate == 0 && my_hit.base_damage > 1)
hate = my_hit.base_damage;
//if weapon damage > 0 then we know we can hit the target with this weapon
//otherwise we cannot and we set the damage to -5 later on
if (my_hit.base_damage > 0) {
// if we revamp this function be more general, we will have to make sure this isn't
// executed for anything BUT normal melee damage weapons from auto attack
if (Hand == EQ::invslot::slotPrimary || Hand == EQ::invslot::slotSecondary)
my_hit.base_damage = DoDamageCaps(my_hit.base_damage);
auto shield_inc = spellbonuses.ShieldEquipDmgMod + itembonuses.ShieldEquipDmgMod + aabonuses.ShieldEquipDmgMod;
if (shield_inc > 0 && HasShieldEquiped() && Hand == EQ::invslot::slotPrimary) {
my_hit.base_damage = my_hit.base_damage * (100 + shield_inc) / 100;
hate = hate * (100 + shield_inc) / 100;
}
CheckIncreaseSkill(my_hit.skill, other, -15);
CheckIncreaseSkill(EQ::skills::SkillOffense, other, -15);
// ***************************************************************
// *** Calculate the damage bonus, if applicable, for this hit ***
// ***************************************************************
#ifndef EQEMU_NO_WEAPON_DAMAGE_BONUS
// If you include the preprocessor directive "#define EQEMU_NO_WEAPON_DAMAGE_BONUS", that indicates that you do not
// want damage bonuses added to weapon damage at all. This feature was requested by ChaosSlayer on the EQEmu Forums.
//
// This is not recommended for normal usage, as the damage bonus represents a non-trivial component of the DPS output
// of weapons wielded by higher-level melee characters (especially for two-handed weapons).
int ucDamageBonus = 0;
if (Hand == EQ::invslot::slotPrimary && GetLevel() >= 28 && IsWarriorClass())
{
// Damage bonuses apply only to hits from the main hand (Hand == MainPrimary) by characters level 28 and above
// who belong to a melee class. If we're here, then all of these conditions apply.
ucDamageBonus = GetWeaponDamageBonus(weapon ? weapon->GetItem() : (const EQ::ItemData*) nullptr);
my_hit.min_damage = ucDamageBonus;
hate += ucDamageBonus;
}
#endif
//Live AA - Sinister Strikes *Adds weapon damage bonus to offhand weapon.
if (Hand == EQ::invslot::slotSecondary) {
if (aabonuses.SecondaryDmgInc || itembonuses.SecondaryDmgInc || spellbonuses.SecondaryDmgInc) {
ucDamageBonus = GetWeaponDamageBonus(weapon ? weapon->GetItem() : (const EQ::ItemData*) nullptr, true);
my_hit.min_damage = ucDamageBonus;
hate += ucDamageBonus;
}
}
// damage = mod_client_damage(damage, skillinuse, Hand, weapon, other);
LogCombat("Damage calculated: base [{}] min damage [{}] skill [{}]", my_hit.base_damage, my_hit.min_damage, my_hit.skill);
int hit_chance_bonus = 0;
my_hit.offense = offense(my_hit.skill); // we need this a few times
my_hit.hand = Hand;
if (opts) {
my_hit.base_damage *= opts->damage_percent;
my_hit.base_damage += opts->damage_flat;
hate *= opts->hate_percent;
hate += opts->hate_flat;
hit_chance_bonus += opts->hit_chance;
}
my_hit.tohit = GetTotalToHit(my_hit.skill, hit_chance_bonus);
DoAttack(other, my_hit, opts);
}
else {
my_hit.damage_done = DMG_INVULNERABLE;
}
// Hate Generation is on a per swing basis, regardless of a hit, miss, or block, its always the same.
// If we are this far, this means we are atleast making a swing.
other->AddToHateList(this, hate);
//Guard Assist Code
if (RuleB(Character, PVPEnableGuardFactionAssist)) {
if (IsClient() && other->IsClient() || (HasOwner() && GetOwner()->IsClient() && other->IsClient() )) {
auto& mob_list = entity_list.GetCloseMobList(other);
for (auto& e : mob_list) {
auto mob = e.second;
if (mob->IsNPC() && mob->CastToNPC()->IsGuard()) {
float distance = Distance(other->CastToClient()->m_Position, mob->GetPosition());
if ((mob->CheckLosFN(other) || mob->CheckLosFN(this)) && distance <= 70) {
auto petorowner = GetOwnerOrSelf();
if (other->GetReverseFactionCon(mob) <= petorowner->GetReverseFactionCon(mob)) {
mob->AddToHateList(this);
}
}
}
}
}
}
///////////////////////////////////////////////////////////
////// Send Attack Damage
///////////////////////////////////////////////////////////
if (my_hit.damage_done > 0 && aabonuses.SkillAttackProc[SBIndex::SKILLPROC_CHANCE] && aabonuses.SkillAttackProc[SBIndex::SKILLPROC_SKILL] == my_hit.skill &&
IsValidSpell(aabonuses.SkillAttackProc[SBIndex::SKILLPROC_SPELL_ID])) {
float chance = aabonuses.SkillAttackProc[SBIndex::SKILLPROC_CHANCE] / 1000.0f;
if (zone->random.Roll(chance))
SpellFinished(aabonuses.SkillAttackProc[SBIndex::SKILLPROC_SPELL_ID], other, EQ::spells::CastingSlot::Item, 0, -1,
spells[aabonuses.SkillAttackProc[SBIndex::SKILLPROC_SPELL_ID]].resist_difficulty);
}
other->Damage(this, my_hit.damage_done, SPELL_UNKNOWN, my_hit.skill, true, -1, false, m_specialattacks);
if (IsDead()) return false;
MeleeLifeTap(my_hit.damage_done);
if (my_hit.damage_done > 0 && HasSkillProcSuccess() && other && other->GetHP() > 0)
TrySkillProc(other, my_hit.skill, 0, true, Hand);
CommonBreakInvisibleFromCombat();
if (GetTarget())
TriggerDefensiveProcs(other, Hand, true, my_hit.damage_done);
if (my_hit.damage_done > 0)
return true;
else
return false;
}
//used by complete heal and #heal
void Mob::Heal()
{
SetMaxHP();
SendHPUpdate();
}
void Client::Damage(Mob* other, int32 damage, uint16 spell_id, EQ::skills::SkillType attack_skill, bool avoidable, int8 buffslot, bool iBuffTic, eSpecialAttacks special)
{
if (dead || IsCorpse())
return;
if (spell_id == 0)
spell_id = SPELL_UNKNOWN;
// cut all PVP spell damage to 2/3
// Blasting ourselfs is considered PvP
//Don't do PvP mitigation if the caster is damaging himself
//should this be applied to all damage? comments sound like some is for spell DMG
//patch notes on PVP reductions only mention archery/throwing ... not normal dmg
if (other && other->IsClient() && (other != this) && damage > 0) {
int PvPMitigation = 100;
if (attack_skill == EQ::skills::SkillArchery || attack_skill == EQ::skills::SkillThrowing)
PvPMitigation = 80;
else
PvPMitigation = 67;
damage = std::max((damage * PvPMitigation) / 100, 1);
}
if (!ClientFinishedLoading())
damage = -5;
//do a majority of the work...
CommonDamage(other, damage, spell_id, attack_skill, avoidable, buffslot, iBuffTic, special);
if (damage > 0) {
if (spell_id == SPELL_UNKNOWN)
CheckIncreaseSkill(EQ::skills::SkillDefense, other, -15);
}
}
bool Client::Death(Mob* killerMob, int32 damage, uint16 spell, EQ::skills::SkillType attack_skill)
{
if (!ClientFinishedLoading())
return false;
if (dead)
return false; //cant die more than once...
if (!spell)
spell = SPELL_UNKNOWN;
std::string export_string = fmt::format(
"{} {} {} {}",
killerMob ? killerMob->GetID() : 0,
damage,
spell,
static_cast<int>(attack_skill)
);
if (parse->EventPlayer(EVENT_DEATH, this, export_string, 0) != 0) {
if (GetHP() < 0) {
SetHP(0);
}
return false;
}
if (killerMob && killerMob->IsClient() && (spell != SPELL_UNKNOWN) && damage > 0) {
char val1[20] = { 0 };
entity_list.MessageCloseString(
this, /* Sender */
false, /* Skip Sender */
RuleI(Range, DamageMessages),
Chat::NonMelee, /* 283 */
HIT_NON_MELEE, /* %1 hit %2 for %3 points of non-melee damage. */
killerMob->GetCleanName(), /* Message1 */
GetCleanName(), /* Message2 */
ConvertArray(damage, val1)/* Message3 */
);
}
int exploss = 0;
LogCombat("Fatal blow dealt by [{}] with [{}] damage, spell [{}], skill [{}]", killerMob ? killerMob->GetName() : "Unknown", damage, spell, attack_skill);
// #1: Send death packet to everyone
uint8 killed_level = GetLevel();
SendLogoutPackets();
/* Make self become corpse packet */
EQApplicationPacket app2(OP_BecomeCorpse, sizeof(BecomeCorpse_Struct));
BecomeCorpse_Struct* bc = (BecomeCorpse_Struct*)app2.pBuffer;
bc->spawn_id = GetID();
bc->x = GetX();
bc->y = GetY();
bc->z = GetZ();
QueuePacket(&app2);
/* Make Death Packet */
EQApplicationPacket app(OP_Death, sizeof(Death_Struct));
Death_Struct* d = (Death_Struct*)app.pBuffer;
d->spawn_id = GetID();
d->killer_id = killerMob ? killerMob->GetID() : 0;
d->corpseid = GetID();
d->bindzoneid = m_pp.binds[0].zone_id;
d->spell_id = spell == SPELL_UNKNOWN ? 0xffffffff : spell;
d->attack_skill = spell != SPELL_UNKNOWN ? 0xe7 : attack_skill;
d->damage = damage;
app.priority = 6;
entity_list.QueueClients(this, &app);
// #2: figure out things that affect the player dying and mark them dead
InterruptSpell();
Mob* m_pet = GetPet();
SetPet(0);
SetHorseId(0);
ShieldAbilityClearVariables();
dead = true;
if (m_pet && m_pet->IsCharmed()) {
m_pet->BuffFadeByEffect(SE_Charm);
}
if (GetMerc()) {
GetMerc()->Suspend();
}
if (killerMob != nullptr)
{
if (killerMob->IsNPC()) {
parse->EventNPC(EVENT_SLAY, killerMob->CastToNPC(), this, "", 0);
mod_client_death_npc(killerMob);
uint16 emoteid = killerMob->GetEmoteID();
if (emoteid != 0)
killerMob->CastToNPC()->DoNPCEmote(KILLEDPC, emoteid);
killerMob->TrySpellOnKill(killed_level, spell);
}
if (killerMob->IsClient() && (IsDueling() || killerMob->CastToClient()->IsDueling())) {
SetDueling(false);
SetDuelTarget(0);
if (killerMob->IsClient() && killerMob->CastToClient()->IsDueling() && killerMob->CastToClient()->GetDuelTarget() == GetID())
{
//if duel opponent killed us...
killerMob->CastToClient()->SetDueling(false);
killerMob->CastToClient()->SetDuelTarget(0);
entity_list.DuelMessage(killerMob, this, false);
mod_client_death_duel(killerMob);
}
else {
//otherwise, we just died, end the duel.
Mob* who = entity_list.GetMob(GetDuelTarget());
if (who && who->IsClient()) {
who->CastToClient()->SetDueling(false);
who->CastToClient()->SetDuelTarget(0);
}
}
}
}
entity_list.RemoveFromTargets(this, true);
hate_list.RemoveEntFromHateList(this);
RemoveAutoXTargets();
//remove ourself from all proximities
ClearAllProximities();
/*
#3: exp loss and corpse generation
*/
// figure out if they should lose exp
if (RuleB(Character, UseDeathExpLossMult)) {
float GetNum[] = { 0.005f,0.015f,0.025f,0.035f,0.045f,0.055f,0.065f,0.075f,0.085f,0.095f,0.110f };
int Num = RuleI(Character, DeathExpLossMultiplier);
if ((Num < 0) || (Num > 10))
Num = 3;
float loss = GetNum[Num];
exploss = (int)((float)GetEXP() * (loss)); //loose % of total XP pending rule (choose 0-10)
}
if (!RuleB(Character, UseDeathExpLossMult)) {
exploss = (int)(GetLevel() * (GetLevel() / 18.0) * 12000);
}
if (RuleB(Zone, LevelBasedEXPMods)) {
// Death in levels with xp_mod (such as hell levels) was resulting
// in losing more that appropriate since the loss was the same but
// getting it back would take way longer. This makes the death the
// same amount of time to recover. Will also lose more if level is
// granting a bonus.
exploss *= zone->level_exp_mod[GetLevel()].ExpMod;
}
if ((GetLevel() < RuleI(Character, DeathExpLossLevel)) || (GetLevel() > RuleI(Character, DeathExpLossMaxLevel)) || IsBecomeNPC())
{
exploss = 0;
}
else if (killerMob)
{
if (killerMob->IsClient())
{
exploss = 0;
}
else if (killerMob->GetOwner() && killerMob->GetOwner()->IsClient())
{
exploss = 0;
}
}
if (spell != SPELL_UNKNOWN)
{
uint32 buff_count = GetMaxTotalSlots();
for (uint16 buffIt = 0; buffIt < buff_count; buffIt++)
{
if (buffs[buffIt].spellid == spell && buffs[buffIt].client)
{
exploss = 0; // no exp loss for pvp dot
break;
}
}
}
bool LeftCorpse = false;
// now we apply the exp loss, unmem their spells, and make a corpse
// unless they're a GM (or less than lvl 10
if (!GetGM())
{
if (exploss > 0) {
int32 newexp = GetEXP();
if (exploss > newexp) {
//lost more than we have... wtf..
newexp = 1;
}
else {
newexp -= exploss;
}
SetEXP(newexp, GetAAXP());
//m_epp.perAA = 0; //reset to no AA exp on death.
}
//this generates a lot of 'updates' to the client that the client does not need
BuffFadeNonPersistDeath();
if (RuleB(Character, UnmemSpellsOnDeath)) {
if ((ClientVersionBit() & EQ::versions::maskSoFAndLater) && RuleB(Character, RespawnFromHover))
UnmemSpellAll(true);
else
UnmemSpellAll(false);
}
if ((RuleB(Character, LeaveCorpses) && GetLevel() >= RuleI(Character, DeathItemLossLevel)) || RuleB(Character, LeaveNakedCorpses))
{
// creating the corpse takes the cash/items off the player too
auto new_corpse = new Corpse(this, exploss);
std::string tmp;
database.GetVariable("ServerType", tmp);
if (tmp[0] == '1' && tmp[1] == '\0' && killerMob != nullptr && killerMob->IsClient()) {
database.GetVariable("PvPreward", tmp);
int reward = atoi(tmp.c_str());
if (reward == 3) {
database.GetVariable("PvPitem", tmp);
int pvpitem = atoi(tmp.c_str());
if (pvpitem>0 && pvpitem<200000)
new_corpse->SetPlayerKillItemID(pvpitem);
}
else if (reward == 2)
new_corpse->SetPlayerKillItemID(-1);
else if (reward == 1)
new_corpse->SetPlayerKillItemID(1);
else
new_corpse->SetPlayerKillItemID(0);
if (killerMob->CastToClient()->isgrouped) {
Group* group = entity_list.GetGroupByClient(killerMob->CastToClient());
if (group != 0)
{
for (int i = 0; i<6; i++)
{
if (group->members[i] != nullptr)
{
new_corpse->AllowPlayerLoot(group->members[i], i);
}
}
}
}
}
entity_list.AddCorpse(new_corpse, GetID());
SetID(0);
//send the become corpse packet to everybody else in the zone.
entity_list.QueueClients(this, &app2, true);
LeftCorpse = true;
}
}
else {
BuffFadeDetrimental();
}
/*
Reset AA reuse timers that need to be, live-like this is only Lay on Hands
*/
ResetOnDeathAlternateAdvancement();
/*
Reset reuse timer for classic skill based Lay on Hands (For tit I guess)
*/
if (GetClass() == PALADIN) // we could check if it's not expired I guess, but should be fine not to
p_timers.Clear(&database, pTimerLayHands);
/*
Finally, send em home
We change the mob variables, not pp directly, because Save() will copy
from these and overwrite what we set in pp anyway
*/
if (LeftCorpse && (ClientVersionBit() & EQ::versions::maskSoFAndLater) && RuleB(Character, RespawnFromHover))
{
ClearDraggedCorpses();
RespawnFromHoverTimer.Start(RuleI(Character, RespawnFromHoverTimer) * 1000);
SendRespawnBinds();
}
else
{
if (isgrouped)
{
Group *g = GetGroup();
if (g)
g->MemberZoned(this);
}
Raid* r = entity_list.GetRaidByClient(this);
if (r)
r->MemberZoned(this);
dead_timer.Start(5000, true);
m_pp.zone_id = m_pp.binds[0].zone_id;
m_pp.zoneInstance = m_pp.binds[0].instance_id;
database.MoveCharacterToZone(this->CharacterID(), m_pp.zone_id);
Save();
GoToDeath();
}
/* QS: PlayerLogDeaths */
if (RuleB(QueryServ, PlayerLogDeaths)) {
const char * killer_name = "";
if (killerMob && killerMob->GetCleanName()) { killer_name = killerMob->GetCleanName(); }
std::string event_desc = StringFormat("Died in zoneid:%i instid:%i by '%s', spellid:%i, damage:%i", this->GetZoneID(), this->GetInstanceID(), killer_name, spell, damage);
QServ->PlayerLogEvent(Player_Log_Deaths, this->CharacterID(), event_desc);
}
parse->EventPlayer(EVENT_DEATH_COMPLETE, this, export_string, 0);
return true;
}
//SYNC WITH: tune.cpp, mob.h TuneNPCAttack
bool NPC::Attack(Mob* other, int Hand, bool bRiposte, bool IsStrikethrough, bool IsFromSpell, ExtraAttackOptions *opts)
{
if (!other) {
SetTarget(nullptr);
LogError("A null Mob object was passed to NPC::Attack() for evaluation!");
return false;
}
if (DivineAura())
return(false);
if (!GetTarget())
SetTarget(other);
//Check that we can attack before we calc heading and face our target
if (!IsAttackAllowed(other)) {
if (this->GetOwnerID())
this->SayString(NOT_LEGAL_TARGET);
if (other) {
if (other->IsClient())
other->CastToClient()->RemoveXTarget(this, false);
RemoveFromHateList(other);
LogCombat("I am not allowed to attack [{}]", other->GetName());
}
return false;
}
FaceTarget(GetTarget());
DamageHitInfo my_hit;
my_hit.skill = EQ::skills::SkillHandtoHand;
my_hit.hand = Hand;
my_hit.damage_done = 1;
if (Hand == EQ::invslot::slotPrimary) {
my_hit.skill = static_cast<EQ::skills::SkillType>(GetPrimSkill());
OffHandAtk(false);
}
if (Hand == EQ::invslot::slotSecondary) {
my_hit.skill = static_cast<EQ::skills::SkillType>(GetSecSkill());
OffHandAtk(true);
}
//figure out what weapon they are using, if any
const EQ::ItemData* weapon = nullptr;
if (Hand == EQ::invslot::slotPrimary && equipment[EQ::invslot::slotPrimary] > 0)
weapon = database.GetItem(equipment[EQ::invslot::slotPrimary]);
else if (equipment[EQ::invslot::slotSecondary])
weapon = database.GetItem(equipment[EQ::invslot::slotSecondary]);
//We dont factor much from the weapon into the attack.
//Just the skill type so it doesn't look silly using punching animations and stuff while wielding weapons
if (weapon) {
LogCombat("Attacking with weapon: [{}] ([{}]) (too bad im not using it for much)", weapon->Name, weapon->ID);
if (Hand == EQ::invslot::slotSecondary && !weapon->IsType1HWeapon()) {
LogCombat("Attack with non-weapon cancelled");
return false;
}
switch (weapon->ItemType) {
case EQ::item::ItemType1HSlash:
my_hit.skill = EQ::skills::Skill1HSlashing;
break;
case EQ::item::ItemType2HSlash:
my_hit.skill = EQ::skills::Skill2HSlashing;
break;
case EQ::item::ItemType1HPiercing:
my_hit.skill = EQ::skills::Skill1HPiercing;
break;
case EQ::item::ItemType2HPiercing:
my_hit.skill = EQ::skills::Skill2HPiercing;
break;
case EQ::item::ItemType1HBlunt:
my_hit.skill = EQ::skills::Skill1HBlunt;
break;
case EQ::item::ItemType2HBlunt:
my_hit.skill = EQ::skills::Skill2HBlunt;
break;
case EQ::item::ItemTypeBow:
my_hit.skill = EQ::skills::SkillArchery;
break;
case EQ::item::ItemTypeLargeThrowing:
case EQ::item::ItemTypeSmallThrowing:
my_hit.skill = EQ::skills::SkillThrowing;
break;
default:
my_hit.skill = EQ::skills::SkillHandtoHand;
break;
}
}
//Guard Assist Code
if (RuleB(Character, PVPEnableGuardFactionAssist)) {
if (IsClient() && other->IsClient() || (HasOwner() && GetOwner()->IsClient() && other->IsClient())) {
auto& mob_list = entity_list.GetCloseMobList(other);
for (auto& e : mob_list) {
auto mob = e.second;
if (mob->IsNPC() && mob->CastToNPC()->IsGuard()) {
float distance = Distance(other->GetPosition(), mob->GetPosition());
if ((mob->CheckLosFN(other) || mob->CheckLosFN(this)) && distance <= 70) {
if (other->GetReverseFactionCon(mob) <= GetOwner()->GetReverseFactionCon(mob)) {
mob->AddToHateList(this);
}
}
}
}
}
}
int weapon_damage = GetWeaponDamage(other, weapon);
//do attack animation regardless of whether or not we can hit below
int16 charges = 0;
EQ::ItemInstance weapon_inst(weapon, charges);
my_hit.skill = AttackAnimation(Hand, &weapon_inst, my_hit.skill);
//basically "if not immune" then do the attack
if (weapon_damage > 0) {
//ele and bane dmg too
//NPCs add this differently than PCs
//if NPCs can't inheriently hit the target we don't add bane/magic dmg which isn't exactly the same as PCs
int eleBane = 0;
if (weapon) {
if (RuleB(NPC, UseBaneDamage)) {
if (weapon->BaneDmgBody == other->GetBodyType()) {
eleBane += weapon->BaneDmgAmt;
}
if (weapon->BaneDmgRace == other->GetRace()) {
eleBane += weapon->BaneDmgRaceAmt;
}
}
// I don't think NPCs use this either ....
if (weapon->ElemDmgAmt) {
eleBane += (weapon->ElemDmgAmt * other->ResistSpell(weapon->ElemDmgType, 0, this) / 100);
}
}
if (!RuleB(NPC, UseItemBonusesForNonPets)) {
if (!GetOwner()) {
eleBane = 0;
}
}
uint8 otherlevel = other->GetLevel();
uint8 mylevel = this->GetLevel();
otherlevel = otherlevel ? otherlevel : 1;
mylevel = mylevel ? mylevel : 1;
//damage = mod_npc_damage(damage, skillinuse, Hand, weapon, other);
my_hit.base_damage = GetBaseDamage() + eleBane;
my_hit.min_damage = GetMinDamage();
int32 hate = my_hit.base_damage + my_hit.min_damage;
int hit_chance_bonus = 0;
if (opts) {
my_hit.base_damage *= opts->damage_percent;
my_hit.base_damage += opts->damage_flat;
hate *= opts->hate_percent;
hate += opts->hate_flat;
hit_chance_bonus += opts->hit_chance;
}
my_hit.offense = offense(my_hit.skill);
my_hit.tohit = GetTotalToHit(my_hit.skill, hit_chance_bonus);
DoAttack(other, my_hit, opts);
other->AddToHateList(this, hate);
LogCombat("Final damage against [{}]: [{}]", other->GetName(), my_hit.damage_done);
if (other->IsClient() && IsPet() && GetOwner()->IsClient()) {
//pets do half damage to clients in pvp
my_hit.damage_done /= 2;
if (my_hit.damage_done < 1)
my_hit.damage_done = 1;
}
}
else {
my_hit.damage_done = DMG_INVULNERABLE;
}
if (GetHP() > 0 && !other->HasDied()) {
other->Damage(this, my_hit.damage_done, SPELL_UNKNOWN, my_hit.skill, true, -1, false, m_specialattacks); // Not avoidable client already had thier chance to Avoid
}
else
return false;
if (HasDied()) //killed by damage shield ect
return false;
MeleeLifeTap(my_hit.damage_done);
CommonBreakInvisibleFromCombat();
//I doubt this works...
if (!GetTarget())
return true; //We killed them
if (!bRiposte && !other->HasDied()) {
TryWeaponProc(nullptr, weapon, other, Hand); //no weapon
if (!other->HasDied())
TrySpellProc(nullptr, weapon, other, Hand);
if (my_hit.damage_done > 0 && HasSkillProcSuccess() && !other->HasDied())
TrySkillProc(other, my_hit.skill, 0, true, Hand);
}
if (GetHP() > 0 && !other->HasDied())
TriggerDefensiveProcs(other, Hand, true, my_hit.damage_done);
if (my_hit.damage_done > 0)
return true;
else
return false;
}
void NPC::Damage(Mob* other, int32 damage, uint16 spell_id, EQ::skills::SkillType attack_skill, bool avoidable, int8 buffslot, bool iBuffTic, eSpecialAttacks special) {
if (spell_id == 0)
spell_id = SPELL_UNKNOWN;
//handle EVENT_ATTACK. Resets after we have not been attacked for 12 seconds
if (attacked_timer.Check())
{
LogCombat("Triggering EVENT_ATTACK due to attack by [{}]", other ? other->GetName() : "nullptr");
parse->EventNPC(EVENT_ATTACK, this, other, "", 0);
}
attacked_timer.Start(CombatEventTimer_expire);
if (!IsEngaged())
zone->AddAggroMob();
if (GetClass() == LDON_TREASURE)
{
if (IsLDoNLocked() && GetLDoNLockedSkill() != LDoNTypeMechanical)
{
damage = -5;
}
else
{
if (IsLDoNTrapped())
{
MessageString(Chat::Red, LDON_ACCIDENT_SETOFF2);
SpellFinished(GetLDoNTrapSpellID(), other, EQ::spells::CastingSlot::Item, 0, -1, spells[GetLDoNTrapSpellID()].resist_difficulty, false);
SetLDoNTrapSpellID(0);
SetLDoNTrapped(false);
SetLDoNTrapDetected(false);
}
}
}
//do a majority of the work...
CommonDamage(other, damage, spell_id, attack_skill, avoidable, buffslot, iBuffTic, special);
if (damage > 0) {
//see if we are gunna start fleeing
if (!IsPet()) CheckFlee();
}
}
bool NPC::Death(Mob* killer_mob, int32 damage, uint16 spell, EQ::skills::SkillType attack_skill)
{
LogCombat("Fatal blow dealt by [{}] with [{}] damage, spell [{}], skill [{}]",
((killer_mob) ? (killer_mob->GetName()) : ("[nullptr]")), damage, spell, attack_skill);
Mob *oos = nullptr;
if (killer_mob) {
oos = killer_mob->GetOwnerOrSelf();
std::string buffer = fmt::format("{} {} {} {}", killer_mob->GetID(), damage, spell, static_cast<int>(attack_skill));
if (parse->EventNPC(EVENT_DEATH, this, oos, buffer.c_str(), 0) != 0) {
if (GetHP() < 0) {
SetHP(0);
}
return false;
}
if (killer_mob->IsClient() && (spell != SPELL_UNKNOWN) && damage > 0) {
char val1[20] = { 0 };
entity_list.MessageCloseString(
this, /* Sender */
false, /* Skip Sender */
RuleI(Range, DamageMessages),
Chat::NonMelee, /* 283 */
HIT_NON_MELEE, /* %1 hit %2 for %3 points of non-melee damage. */
killer_mob->GetCleanName(), /* Message1 */
GetCleanName(), /* Message2 */
ConvertArray(damage, val1) /* Message3 */
);
}
}
else {
std::string buffer = fmt::format("{} {} {} {}", 0, damage, spell, static_cast<int>(attack_skill));
if (parse->EventNPC(EVENT_DEATH, this, nullptr, buffer.c_str(), 0) != 0) {
if (GetHP() < 0) {
SetHP(0);
}
return false;
}
}
if (IsEngaged()) {
zone->DelAggroMob();
Log(Logs::Detail, Logs::Attack, "%s Mobs currently Aggro %i", __FUNCTION__, zone->MobsAggroCount());
}
ShieldAbilityClearVariables();
SetHP(0);
SetPet(0);
if (GetSwarmOwner()) {
Mob* owner = entity_list.GetMobID(GetSwarmOwner());
if (owner)
owner->SetTempPetCount(owner->GetTempPetCount() - 1);
}
Mob* killer = GetHateDamageTop(this);
entity_list.RemoveFromTargets(this, p_depop);
if (p_depop == true)
return false;
HasAISpellEffects = false;
BuffFadeAll();
uint8 killed_level = GetLevel();
if (GetClass() == LDON_TREASURE) { // open chest
auto outapp = new EQApplicationPacket(OP_Animation, sizeof(Animation_Struct));
Animation_Struct* anim = (Animation_Struct*)outapp->pBuffer;
anim->spawnid = GetID();
anim->action = 0x0F;
anim->speed = 10;
entity_list.QueueCloseClients(this, outapp);
safe_delete(outapp);
}
auto app = new EQApplicationPacket(OP_Death, sizeof(Death_Struct));
Death_Struct* d = (Death_Struct*)app->pBuffer;
d->spawn_id = GetID();
d->killer_id = killer_mob ? killer_mob->GetID() : 0;
d->bindzoneid = 0;
d->spell_id = 0xffffffff; // Sending spell was causing extra DoT land msg
d->attack_skill = SkillDamageTypes[attack_skill];
d->damage = damage;
app->priority = 6;
entity_list.QueueClients(killer_mob, app, false);
safe_delete(app);
if (respawn2) {
respawn2->DeathReset(1);
}
if (killer_mob && GetClass() != LDON_TREASURE)
hate_list.AddEntToHateList(killer_mob, damage);
Mob *give_exp = hate_list.GetDamageTopOnHateList(this);
if (give_exp == nullptr)
give_exp = killer;
if (give_exp && give_exp->HasOwner()) {
bool ownerInGroup = false;
if ((give_exp->HasGroup() && give_exp->GetGroup()->IsGroupMember(give_exp->GetUltimateOwner()))
|| (give_exp->IsPet() && (give_exp->GetOwner()->IsClient()
|| (give_exp->GetOwner()->HasGroup() && give_exp->GetOwner()->GetGroup()->IsGroupMember(give_exp->GetOwner()->GetUltimateOwner())))))
ownerInGroup = true;
give_exp = give_exp->GetUltimateOwner();
#ifdef BOTS
if (!RuleB(Bots, BotGroupXP) && !ownerInGroup) {
give_exp = nullptr;
}
#endif //BOTS
}
if (give_exp && give_exp->IsTempPet() && give_exp->IsPetOwnerClient()) {
if (give_exp->IsNPC() && give_exp->CastToNPC()->GetSwarmOwner()) {
Mob* temp_owner = entity_list.GetMobID(give_exp->CastToNPC()->GetSwarmOwner());
if (temp_owner)
give_exp = temp_owner;
}
}
int PlayerCount = 0; // QueryServ Player Counting
Client *give_exp_client = nullptr;
if (give_exp && give_exp->IsClient())
give_exp_client = give_exp->CastToClient();
//do faction hits even if we are a merchant, so long as a player killed us
if (give_exp_client && !RuleB(NPC, EnableMeritBasedFaction))
hate_list.DoFactionHits(GetNPCFactionID());
bool IsLdonTreasure = (this->GetClass() == LDON_TREASURE);
if (give_exp_client && !IsCorpse()) {
Group *kg = entity_list.GetGroupByClient(give_exp_client);
Raid *kr = entity_list.GetRaidByClient(give_exp_client);
int32 finalxp = give_exp_client->GetExperienceForKill(this);
finalxp = give_exp_client->mod_client_xp(finalxp, this);
// handle task credit on behalf of the killer
if (RuleB(TaskSystem, EnableTaskSystem)) {
LogTasksDetail(
"[NPC::Death] Triggering HandleUpdateTasksOnKill for [{}] npc [{}]",
give_exp_client->GetCleanName(),
GetNPCTypeID()
);
task_manager->HandleUpdateTasksOnKill(give_exp_client, GetNPCTypeID());
}
if (kr) {
if (!IsLdonTreasure && MerchantType == 0) {
kr->SplitExp((finalxp), this);
if (killer_mob && (kr->IsRaidMember(killer_mob->GetName()) || kr->IsRaidMember(killer_mob->GetUltimateOwner()->GetName())))
killer_mob->TrySpellOnKill(killed_level, spell);
}
/* Send the EVENT_KILLED_MERIT event for all raid members */
for (int i = 0; i < MAX_RAID_MEMBERS; i++) {
if (kr->members[i].member != nullptr && kr->members[i].member->IsClient()) { // If Group Member is Client
Client *c = kr->members[i].member;
parse->EventNPC(EVENT_KILLED_MERIT, this, c, "killed", 0);
if (RuleB(NPC, EnableMeritBasedFaction))
c->SetFactionLevel(c->CharacterID(), GetNPCFactionID(), c->GetBaseClass(), c->GetBaseRace(), c->GetDeity());
mod_npc_killed_merit(kr->members[i].member);
PlayerCount++;
}
}
// QueryServ Logging - Raid Kills
if (RuleB(QueryServ, PlayerLogNPCKills)) {
auto pack =
new ServerPacket(ServerOP_QSPlayerLogNPCKills,
sizeof(QSPlayerLogNPCKill_Struct) +
(sizeof(QSPlayerLogNPCKillsPlayers_Struct) * PlayerCount));
PlayerCount = 0;
QSPlayerLogNPCKill_Struct* QS = (QSPlayerLogNPCKill_Struct*)pack->pBuffer;
QS->s1.NPCID = this->GetNPCTypeID();
QS->s1.ZoneID = this->GetZoneID();
QS->s1.Type = 2; // Raid Fight
for (int i = 0; i < MAX_RAID_MEMBERS; i++) {
if (kr->members[i].member != nullptr && kr->members[i].member->IsClient()) { // If Group Member is Client
Client *c = kr->members[i].member;
QS->Chars[PlayerCount].char_id = c->CharacterID();
PlayerCount++;
}
}
worldserver.SendPacket(pack); // Send Packet to World
safe_delete(pack);
}
// End QueryServ Logging
}
else if (give_exp_client->IsGrouped() && kg != nullptr) {
if (!IsLdonTreasure && MerchantType == 0) {
kg->SplitExp((finalxp), this);
if (killer_mob && (kg->IsGroupMember(killer_mob->GetName()) || kg->IsGroupMember(killer_mob->GetUltimateOwner()->GetName())))
killer_mob->TrySpellOnKill(killed_level, spell);
}
/* Send the EVENT_KILLED_MERIT event and update kill tasks
* for all group members */
for (int i = 0; i < MAX_GROUP_MEMBERS; i++) {
if (kg->members[i] != nullptr && kg->members[i]->IsClient()) { // If Group Member is Client
Client *c = kg->members[i]->CastToClient();
parse->EventNPC(EVENT_KILLED_MERIT, this, c, "killed", 0);
if (RuleB(NPC, EnableMeritBasedFaction))
c->SetFactionLevel(c->CharacterID(), GetNPCFactionID(), c->GetBaseClass(), c->GetBaseRace(), c->GetDeity());
mod_npc_killed_merit(c);
PlayerCount++;
}
}
// QueryServ Logging - Group Kills
if (RuleB(QueryServ, PlayerLogNPCKills)) {
auto pack =
new ServerPacket(ServerOP_QSPlayerLogNPCKills,
sizeof(QSPlayerLogNPCKill_Struct) +
(sizeof(QSPlayerLogNPCKillsPlayers_Struct) * PlayerCount));
PlayerCount = 0;
QSPlayerLogNPCKill_Struct* QS = (QSPlayerLogNPCKill_Struct*)pack->pBuffer;
QS->s1.NPCID = this->GetNPCTypeID();
QS->s1.ZoneID = this->GetZoneID();
QS->s1.Type = 1; // Group Fight
for (int i = 0; i < MAX_GROUP_MEMBERS; i++) {
if (kg->members[i] != nullptr && kg->members[i]->IsClient()) { // If Group Member is Client
Client *c = kg->members[i]->CastToClient();
QS->Chars[PlayerCount].char_id = c->CharacterID();
PlayerCount++;
}
}
worldserver.SendPacket(pack); // Send Packet to World
safe_delete(pack);
}
// End QueryServ Logging
}
else {
if (!IsLdonTreasure && MerchantType == 0) {
int conlevel = give_exp->GetLevelCon(GetLevel());
if (conlevel != CON_GRAY) {
if (!GetOwner() || (GetOwner() && !GetOwner()->IsClient())) {
give_exp_client->AddEXP((finalxp), conlevel);
if (killer_mob && (killer_mob->GetID() == give_exp_client->GetID() || killer_mob->GetUltimateOwner()->GetID() == give_exp_client->GetID()))
killer_mob->TrySpellOnKill(killed_level, spell);
}
}
}
/* Send the EVENT_KILLED_MERIT event */
parse->EventNPC(EVENT_KILLED_MERIT, this, give_exp_client, "killed", 0);
if (RuleB(NPC, EnableMeritBasedFaction))
give_exp_client->SetFactionLevel(give_exp_client->CharacterID(), GetNPCFactionID(), give_exp_client->GetBaseClass(),
give_exp_client->GetBaseRace(), give_exp_client->GetDeity());
mod_npc_killed_merit(give_exp_client);
// QueryServ Logging - Solo
if (RuleB(QueryServ, PlayerLogNPCKills)) {
auto pack = new ServerPacket(ServerOP_QSPlayerLogNPCKills,
sizeof(QSPlayerLogNPCKill_Struct) +
(sizeof(QSPlayerLogNPCKillsPlayers_Struct) * 1));
QSPlayerLogNPCKill_Struct* QS = (QSPlayerLogNPCKill_Struct*)pack->pBuffer;
QS->s1.NPCID = this->GetNPCTypeID();
QS->s1.ZoneID = this->GetZoneID();
QS->s1.Type = 0; // Solo Fight
Client *c = give_exp_client;
QS->Chars[0].char_id = c->CharacterID();
PlayerCount++;
worldserver.SendPacket(pack); // Send Packet to World
safe_delete(pack);
}
// End QueryServ Logging
}
}
bool allow_merchant_corpse = RuleB(Merchant, AllowCorpse);
bool is_merchant = (class_ == MERCHANT || class_ == ADVENTUREMERCHANT || MerchantType != 0);
if (!HasOwner() && !IsMerc() && !GetSwarmInfo() && (!is_merchant || allow_merchant_corpse) &&
((killer && (killer->IsClient() || (killer->HasOwner() && killer->GetUltimateOwner()->IsClient()) ||
(killer->IsNPC() && killer->CastToNPC()->GetSwarmInfo() && killer->CastToNPC()->GetSwarmInfo()->GetOwner() && killer->CastToNPC()->GetSwarmInfo()->GetOwner()->IsClient())))
|| (killer_mob && IsLdonTreasure)))
{
if (killer != 0) {
if (killer->GetOwner() != 0 && killer->GetOwner()->IsClient())
killer = killer->GetOwner();
if (killer->IsClient() && !killer->CastToClient()->GetGM())
this->CheckTrivialMinMaxLevelDrop(killer);
}
entity_list.RemoveFromAutoXTargets(this);
uint16 emoteid = this->GetEmoteID();
auto corpse = new Corpse(this, &itemlist, GetNPCTypeID(), &NPCTypedata,
level > 54 ? RuleI(NPC, MajorNPCCorpseDecayTimeMS)
: RuleI(NPC, MinorNPCCorpseDecayTimeMS));
entity_list.LimitRemoveNPC(this);
entity_list.AddCorpse(corpse, GetID());
entity_list.UnMarkNPC(GetID());
entity_list.RemoveNPC(GetID());
// entity_list.RemoveMobFromCloseLists(this);
close_mobs.clear();
this->SetID(0);
if (killer != 0 && emoteid != 0)
corpse->CastToNPC()->DoNPCEmote(AFTERDEATH, emoteid);
if (killer != 0 && killer->IsClient()) {
corpse->AllowPlayerLoot(killer, 0);
if (killer->IsGrouped()) {
Group* group = entity_list.GetGroupByClient(killer->CastToClient());
if (group != 0) {
for (int i = 0; i<6; i++) { // Doesnt work right, needs work
if (group->members[i] != nullptr) {
corpse->AllowPlayerLoot(group->members[i], i);
}
}
}
}
else if (killer->IsRaidGrouped()) {
Raid* r = entity_list.GetRaidByClient(killer->CastToClient());
if (r) {
int i = 0;
for (int x = 0; x < MAX_RAID_MEMBERS; x++) {
switch (r->GetLootType()) {
case 0:
case 1:
if (r->members[x].member && r->members[x].IsRaidLeader) {
corpse->AllowPlayerLoot(r->members[x].member, i);
i++;
}
break;
case 2:
if (r->members[x].member && r->members[x].IsRaidLeader) {
corpse->AllowPlayerLoot(r->members[x].member, i);
i++;
}
else if (r->members[x].member && r->members[x].IsGroupLeader) {
corpse->AllowPlayerLoot(r->members[x].member, i);
i++;
}
break;
case 3:
if (r->members[x].member && r->members[x].IsLooter) {
corpse->AllowPlayerLoot(r->members[x].member, i);
i++;
}
break;
case 4:
if (r->members[x].member) {
corpse->AllowPlayerLoot(r->members[x].member, i);
i++;
}
break;
}
}
}
}
}
else if (killer_mob && IsLdonTreasure) {
auto u_owner = killer_mob->GetUltimateOwner();
if (u_owner->IsClient())
corpse->AllowPlayerLoot(u_owner, 0);
}
if (zone && zone->adv_data) {
ServerZoneAdventureDataReply_Struct *sr = (ServerZoneAdventureDataReply_Struct*)zone->adv_data;
if (sr->type == Adventure_Kill) {
zone->DoAdventureCountIncrease();
}
else if (sr->type == Adventure_Assassinate) {
if (sr->data_id == GetNPCTypeID()) {
zone->DoAdventureCountIncrease();
}
else {
zone->DoAdventureAssassinationCountIncrease();
}
}
}
}
else {
entity_list.RemoveFromXTargets(this);
}
// Parse quests even if we're killed by an NPC
if (oos) {
mod_npc_killed(oos);
uint16 emoteid = this->GetEmoteID();
if (emoteid != 0)
this->DoNPCEmote(ONDEATH, emoteid);
if (oos->IsNPC()) {
parse->EventNPC(EVENT_NPC_SLAY, oos->CastToNPC(), this, "", 0);
uint16 emoteid = oos->GetEmoteID();
if (emoteid != 0)
oos->CastToNPC()->DoNPCEmote(KILLEDNPC, emoteid);
killer_mob->TrySpellOnKill(killed_level, spell);
}
}
WipeHateList();
p_depop = true;
if (killer_mob && killer_mob->GetTarget() == this) //we can kill things without having them targeted
killer_mob->SetTarget(nullptr); //via AE effects and such..
entity_list.UpdateFindableNPCState(this, true);
std::string buffer = fmt::format("{} {} {} {}", killer_mob ? killer_mob->GetID() : 0, damage, spell, static_cast<int>(attack_skill));
parse->EventNPC(EVENT_DEATH_COMPLETE, this, oos, buffer.c_str(), 0);
/* Zone controller process EVENT_DEATH_ZONE (Death events) */
if (RuleB(Zone, UseZoneController)) {
auto controller = entity_list.GetNPCByNPCTypeID(ZONE_CONTROLLER_NPC_ID);
if (controller && GetNPCTypeID() != ZONE_CONTROLLER_NPC_ID) {
std::string data_pass = fmt::format("{} {} {} {} {}", killer_mob ? killer_mob->GetID() : 0, damage, spell, static_cast<int>(attack_skill), GetNPCTypeID());
parse->EventNPC(EVENT_DEATH_ZONE, controller, nullptr, data_pass.c_str(), 0);
}
}
return true;
}
void Mob::AddToHateList(Mob* other, uint32 hate /*= 0*/, int32 damage /*= 0*/, bool iYellForHelp /*= true*/, bool bFrenzy /*= false*/, bool iBuffTic /*= false*/, uint16 spell_id, bool pet_command)
{
if (!other)
return;
if (other == this)
return;
if (other->IsTrap())
return;
if (damage < 0) {
hate = 1;
}
if (iYellForHelp)
SetPrimaryAggro(true);
else
SetAssistAggro(true);
bool wasengaged = IsEngaged();
Mob* owner = other->GetOwner();
Mob* mypet = this->GetPet();
Mob* myowner = this->GetOwner();
Mob* targetmob = this->GetTarget();
bool on_hatelist = CheckAggro(other);
if (other) {
AddRampage(other);
if (on_hatelist) { // odd reason, if you're not on the hate list, subtlety etc don't apply!
// Spell Casting Subtlety etc
int hatemod = 100 + other->spellbonuses.hatemod + other->itembonuses.hatemod + other->aabonuses.hatemod;
if (hatemod < 1)
hatemod = 1;
hate = ((hate * (hatemod)) / 100);
}
else {
hate += 100; // 100 bonus initial aggro
}
}
// Pet that is /pet hold on will not add to their hate list if they're not engaged
// Pet that is /pet hold on and /pet focus on will not add others to their hate list
// Pet that is /pet ghold on will never add to their hate list unless /pet attack or /pet qattack
// we skip these checks if it's forced through a pet command
if (!pet_command) {
if (IsPet()) {
if ((IsGHeld() || (IsHeld() && IsFocused())) && !on_hatelist) // we want them to be able to climb the hate list
return;
if ((IsHeld() || IsPetStop() || IsPetRegroup()) && !wasengaged) // not 100% sure on stop/regroup kind of hard to test, but regroup is like "classic hold"
return;
}
}
if (other->IsNPC() && (other->IsPet() || other->CastToNPC()->GetSwarmOwner() > 0)) {
TryTriggerOnCastRequirement();
}
if (IsClient() && !IsAIControlled())
return;
if (IsFamiliar() || GetSpecialAbility(IMMUNE_AGGRO))
return;
if (GetSpecialAbility(IMMUNE_AGGRO_NPC) && other->IsNPC())
return;
if (GetSpecialAbility(IMMUNE_AGGRO_CLIENT) && other->IsClient())
return;
if (spell_id != SPELL_UNKNOWN && NoDetrimentalSpellAggro(spell_id))
return;
if (other == myowner)
return;
if (other->GetSpecialAbility(IMMUNE_AGGRO_ON))
return;
if (GetSpecialAbility(NPC_TUNNELVISION)) {
int tv_mod = GetSpecialAbilityParam(NPC_TUNNELVISION, 0);
Mob *top = GetTarget();
if (top && top != other) {
if (tv_mod) {
float tv = tv_mod / 100.0f;
hate *= tv;
}
else {
hate *= RuleR(Aggro, TunnelVisionAggroMod);
}
}
}
// first add self
// The damage on the hate list is used to award XP to the killer. This check is to prevent Killstealing.
// e.g. Mob has 5000 hit points, Player A melees it down to 500 hp, Player B executes a headshot (10000 damage).
// If we add 10000 damage, Player B would get the kill credit, so we only award damage credit to player B of the
// amount of HP the mob had left.
//
if (damage > GetHP())
damage = GetHP();
if (spellbonuses.ImprovedTaunt[SBIndex::IMPROVED_TAUNT_AGGRO_MOD] && (GetLevel() < spellbonuses.ImprovedTaunt[SBIndex::IMPROVED_TAUNT_MAX_LVL])
&& other && (buffs[spellbonuses.ImprovedTaunt[SBIndex::IMPROVED_TAUNT_BUFFSLOT]].casterid != other->GetID()))
hate = (hate*spellbonuses.ImprovedTaunt[SBIndex::IMPROVED_TAUNT_AGGRO_MOD]) / 100;
hate_list.AddEntToHateList(other, hate, damage, bFrenzy, !iBuffTic);
if (other->IsClient() && !on_hatelist && !IsOnFeignMemory(other->CastToClient()))
other->CastToClient()->AddAutoXTarget(this);
#ifdef BOTS
// if other is a bot, add the bots client to the hate list
while (other->IsBot()) {
auto other_ = other->CastToBot();
if (!other_ || !other_->GetBotOwner()) {
break;
}
auto owner_ = other_->GetBotOwner()->CastToClient();
if (!owner_ || owner_->IsDead() || !owner_->InZone()) { // added isdead and inzone checks to avoid issues in AddAutoXTarget(...) below
break;
}
if (owner_->GetFeigned()) {
AddFeignMemory(owner_);
}
else if (!hate_list.IsEntOnHateList(owner_)) {
hate_list.AddEntToHateList(owner_, 0, 0, false, true);
owner_->AddAutoXTarget(this); // this was being called on dead/out-of-zone clients
}
break;
}
#endif //BOTS
// if other is a merc, add the merc client to the hate list
if (other->IsMerc()) {
if (other->CastToMerc()->GetMercOwner() && other->CastToMerc()->GetMercOwner()->CastToClient()->GetFeigned()) {
AddFeignMemory(other->CastToMerc()->GetMercOwner()->CastToClient());
}
else {
if (!hate_list.IsEntOnHateList(other->CastToMerc()->GetMercOwner()))
hate_list.AddEntToHateList(other->CastToMerc()->GetMercOwner(), 0, 0, false, true);
// if mercs are reworked to include adding 'this' to owner's xtarget list, this should reflect bots code above
}
} //MERC
// then add pet owner if there's one
if (owner) { // Other is a pet, add him and it
// EverHood 6/12/06
// Can't add a feigned owner to hate list
if (owner->IsClient() && owner->CastToClient()->GetFeigned()) {
//they avoid hate due to feign death...
}
else {
// cb:2007-08-17
// owner must get on list, but he's not actually gained any hate yet
if (
!owner->GetSpecialAbility(IMMUNE_AGGRO) &&
!(this->GetSpecialAbility(IMMUNE_AGGRO_CLIENT) && owner->IsClient()) &&
!(this->GetSpecialAbility(IMMUNE_AGGRO_NPC) && owner->IsNPC())
) {
if (owner->IsClient() && !CheckAggro(owner))
owner->CastToClient()->AddAutoXTarget(this);
hate_list.AddEntToHateList(owner, 0, 0, false, !iBuffTic);
}
}
}
if (mypet && !mypet->IsHeld() && !mypet->IsPetStop()) { // I have a pet, add other to it
if (
!mypet->IsFamiliar() &&
!mypet->GetSpecialAbility(IMMUNE_AGGRO) &&
!(mypet->GetSpecialAbility(IMMUNE_AGGRO_CLIENT) && this->IsClient()) &&
!(mypet->GetSpecialAbility(IMMUNE_AGGRO_NPC) && this->IsNPC())
) {
mypet->hate_list.AddEntToHateList(other, 0, 0, bFrenzy);
}
}
else if (myowner) { // I am a pet, add other to owner if it's NPC/LD
if (
myowner->IsAIControlled() &&
!myowner->GetSpecialAbility(IMMUNE_AGGRO) &&
!(this->GetSpecialAbility(IMMUNE_AGGRO_CLIENT) && myowner->IsClient()) &&
!(this->GetSpecialAbility(IMMUNE_AGGRO_NPC) && myowner->IsNPC())
) {
myowner->hate_list.AddEntToHateList(other, 0, 0, bFrenzy);
}
}
if (other->GetTempPetCount())
entity_list.AddTempPetsToHateList(other, this, bFrenzy);
if (!wasengaged) {
if (IsNPC() && other->IsClient() && other->CastToClient())
parse->EventNPC(EVENT_AGGRO, this->CastToNPC(), other, "", 0);
AI_Event_Engaged(other, iYellForHelp);
}
}
// this is called from Damage() when 'this' is attacked by 'other.
// 'this' is the one being attacked
// 'other' is the attacker
// a damage shield causes damage (or healing) to whoever attacks the wearer
// a reverse ds causes damage to the wearer whenever it attack someone
// given this, a reverse ds must be checked each time the wearer is attacking
// and not when they're attacked
//a damage shield on a spell is a negative value but on an item it's a positive value so add the spell value and subtract the item value to get the end ds value
void Mob::DamageShield(Mob* attacker, bool spell_ds) {
if (!attacker || this == attacker)
return;
int DS = 0;
int rev_ds = 0;
uint16 spellid = 0;
if (!spell_ds)
{
DS = spellbonuses.DamageShield;
rev_ds = attacker->spellbonuses.ReverseDamageShield;
if (spellbonuses.DamageShieldSpellID != 0 && spellbonuses.DamageShieldSpellID != SPELL_UNKNOWN)
spellid = spellbonuses.DamageShieldSpellID;
}
else {
DS = spellbonuses.SpellDamageShield + itembonuses.SpellDamageShield + aabonuses.SpellDamageShield;
rev_ds = 0;
// This ID returns "you are burned", seemed most appropriate for spell DS
spellid = 2166;
/*
Live Message - not yet used on emu
Feedback onto you "YOUR mind burns from TARGETS NAME's feedback for %i points of non-melee damage."
Feedback onto other "TARGETS NAME's mind burns from YOUR feedback for %i points of non-melee damage."
*/
}
if (DS == 0 && rev_ds == 0)
return;
LogCombat("Applying Damage Shield of value [{}] to [{}]", DS, attacker->GetName());
//invert DS... spells yield negative values for a true damage shield
if (DS < 0) {
if (!spell_ds) {
DS += aabonuses.DamageShield; //Live AA - coat of thistles. (negative value)
DS -= itembonuses.DamageShield; //+Damage Shield should only work when you already have a DS spell
DS -= attacker->aabonuses.DS_Mitigation_Amount + attacker->itembonuses.DS_Mitigation_Amount + attacker->spellbonuses.DS_Mitigation_Amount; //Negative value to reduce
//Do not allow flat amount reductions to reduce past 0.
if (DS >= 0)
return;
//Spell data for damage shield mitigation shows a negative value for spells for clients and positive
//value for spells that effect pets. Unclear as to why. For now will convert all positive to be consistent.
if (attacker->IsOffHandAtk()) {
int32 mitigation = attacker->itembonuses.DSMitigationOffHand +
attacker->spellbonuses.DSMitigationOffHand +
attacker->aabonuses.DSMitigationOffHand;
DS -= DS*mitigation / 100;
}
int ds_mitigation = attacker->itembonuses.DSMitigation;
// Subtract mitigations because DS_Mitigation_Percentage is a negative value when reducing total, thus final value will be positive
ds_mitigation -= attacker->aabonuses.DS_Mitigation_Percentage + attacker->itembonuses.DS_Mitigation_Percentage + attacker->spellbonuses.DS_Mitigation_Percentage; //Negative value to reduce
DS -= DS * ds_mitigation / 100;
}
attacker->Damage(this, -DS, spellid, EQ::skills::SkillAbjuration/*hackish*/, false);
//we can assume there is a spell now
auto outapp = new EQApplicationPacket(OP_Damage, sizeof(CombatDamage_Struct));
CombatDamage_Struct* cds = (CombatDamage_Struct*)outapp->pBuffer;
cds->target = attacker->GetID();
cds->source = GetID();
cds->type = spellbonuses.DamageShieldType;
cds->spellid = 0x0;
cds->damage = DS;
entity_list.QueueCloseClients(this, outapp);
safe_delete(outapp);
}
else if (DS > 0 && !spell_ds) {
//we are healing the attacker...
attacker->HealDamage(DS);
//TODO: send a packet???
}
//Reverse DS
//this is basically a DS, but the spell is on the attacker, not the attackee
//if we've gotten to this point, we know we know "attacker" hit "this" (us) for damage & we aren't invulnerable
uint16 rev_ds_spell_id = SPELL_UNKNOWN;
if (spellbonuses.ReverseDamageShieldSpellID != 0 && spellbonuses.ReverseDamageShieldSpellID != SPELL_UNKNOWN)
rev_ds_spell_id = spellbonuses.ReverseDamageShieldSpellID;
if (rev_ds < 0) {
LogCombat("Applying Reverse Damage Shield of value [{}] to [{}]", rev_ds, attacker->GetName());
attacker->Damage(this, -rev_ds, rev_ds_spell_id, EQ::skills::SkillAbjuration/*hackish*/, false); //"this" (us) will get the hate, etc. not sure how this works on Live, but it'll works for now, and tanks will love us for this
//do we need to send a damage packet here also?
}
}
uint8 Mob::GetWeaponDamageBonus(const EQ::ItemData *weapon, bool offhand)
{
// dev quote with old and new formulas
// https://forums.daybreakgames.com/eq/index.php?threads/test-update-09-17-15.226618/page-5#post-3326194
//
// We assume that the level check is done before calling this function and sinister strikes is checked before
// calling for offhand DB
auto level = GetLevel();
if (!weapon)
return 1 + ((level - 28) / 3); // how does weaponless scale?
auto delay = weapon->Delay;
if (weapon->IsType1HWeapon() || weapon->ItemType == EQ::item::ItemTypeMartial) {
// we assume sinister strikes is checked before calling here
if (!offhand) {
if (delay <= 39)
return 1 + ((level - 28) / 3);
else if (delay < 43)
return 2 + ((level - 28) / 3) + ((delay - 40) / 3);
else if (delay < 45)
return 3 + ((level - 28) / 3) + ((delay - 40) / 3);
else if (delay >= 45)
return 4 + ((level - 28) / 3) + ((delay - 40) / 3);
}
else {
return 1 + ((level - 40) / 3) * (delay / 30); // YOOO shit's useless waste of AAs
}
}
else {
// 2h damage bonus
int damage_bonus = 1 + (level - 28) / 3;
if (delay <= 27)
return damage_bonus + 1;
// Client isn't reflecting what the dev quoted, this matches better
if (level > 29) {
int level_bonus = (level - 30) / 5 + 1;
if (level > 50) {
level_bonus++;
int level_bonus2 = level - 50;
if (level > 67)
level_bonus2 += 5;
else if (level > 59)
level_bonus2 += 4;
else if (level > 58)
level_bonus2 += 3;
else if (level > 56)
level_bonus2 += 2;
else if (level > 54)
level_bonus2++;
level_bonus += level_bonus2 * delay / 40;
}
damage_bonus += level_bonus;
}
if (delay >= 40) {
int delay_bonus = (delay - 40) / 3 + 1;
if (delay >= 45)
delay_bonus += 2;
else if (delay >= 43)
delay_bonus++;
damage_bonus += delay_bonus;
}
return damage_bonus;
}
return 0;
}
int Mob::GetHandToHandDamage(void)
{
if (RuleB(Combat, UseRevampHandToHand)) {
// everyone uses this in the revamp!
int skill = GetSkill(EQ::skills::SkillHandtoHand);
int epic = 0;
if (IsClient() && CastToClient()->GetItemIDAt(12) == 10652 && GetLevel() > 46)
epic = 280;
if (epic > skill)
skill = epic;
return skill / 15 + 3;
}
static uint8 mnk_dmg[] = { 99,
4, 4, 4, 4, 5, 5, 5, 5, 5, 6, // 1-10
6, 6, 6, 6, 7, 7, 7, 7, 7, 8, // 11-20
8, 8, 8, 8, 9, 9, 9, 9, 9, 10, // 21-30
10, 10, 10, 10, 11, 11, 11, 11, 11, 12, // 31-40
12, 12, 12, 12, 13, 13, 13, 13, 13, 14, // 41-50
14, 14, 14, 14, 14, 14, 14, 14, 14, 14, // 51-60
14, 14 }; // 61-62
static uint8 bst_dmg[] = { 99,
4, 4, 4, 4, 4, 5, 5, 5, 5, 5, // 1-10
5, 6, 6, 6, 6, 6, 6, 7, 7, 7, // 11-20
7, 7, 7, 8, 8, 8, 8, 8, 8, 9, // 21-30
9, 9, 9, 9, 9, 10, 10, 10, 10, 10, // 31-40
10, 11, 11, 11, 11, 11, 11, 12, 12 }; // 41-49
if (GetClass() == MONK) {
if (IsClient() && CastToClient()->GetItemIDAt(12) == 10652 && GetLevel() > 50)
return 9;
if (level > 62)
return 15;
return mnk_dmg[level];
}
else if (GetClass() == BEASTLORD) {
if (level > 49)
return 13;
return bst_dmg[level];
}
return 2;
}
int Mob::GetHandToHandDelay(void)
{
if (RuleB(Combat, UseRevampHandToHand)) {
// everyone uses this in the revamp!
int skill = GetSkill(EQ::skills::SkillHandtoHand);
int epic = 0;
int iksar = 0;
if (IsClient() && CastToClient()->GetItemIDAt(12) == 10652 && GetLevel() > 46)
epic = 280;
else if (GetRace() == IKSAR)
iksar = 1;
// the delay bonus from the monk epic scales up to a skill of 280
if (epic >= skill)
epic = skill;
return iksar - epic / 21 + 38;
}
int delay = 35;
static uint8 mnk_hum_delay[] = { 99,
35, 35, 35, 35, 35, 35, 35, 35, 35, 35, // 1-10
35, 35, 35, 35, 35, 35, 35, 35, 35, 35, // 11-20
35, 35, 35, 35, 35, 35, 35, 34, 34, 34, // 21-30
34, 33, 33, 33, 33, 32, 32, 32, 32, 31, // 31-40
31, 31, 31, 30, 30, 30, 30, 29, 29, 29, // 41-50
29, 28, 28, 28, 28, 27, 27, 27, 27, 26, // 51-60
24, 22 }; // 61-62
static uint8 mnk_iks_delay[] = { 99,
35, 35, 35, 35, 35, 35, 35, 35, 35, 35, // 1-10
35, 35, 35, 35, 35, 35, 35, 35, 35, 35, // 11-20
35, 35, 35, 35, 35, 35, 35, 35, 35, 34, // 21-30
34, 34, 34, 34, 34, 33, 33, 33, 33, 33, // 31-40
33, 32, 32, 32, 32, 32, 32, 31, 31, 31, // 41-50
31, 31, 31, 30, 30, 30, 30, 30, 30, 29, // 51-60
25, 23 }; // 61-62
static uint8 bst_delay[] = { 99,
35, 35, 35, 35, 35, 35, 35, 35, 35, 35, // 1-10
35, 35, 35, 35, 35, 35, 35, 35, 35, 35, // 11-20
35, 35, 35, 35, 35, 35, 35, 35, 34, 34, // 21-30
34, 34, 34, 33, 33, 33, 33, 33, 32, 32, // 31-40
32, 32, 32, 31, 31, 31, 31, 31, 30, 30, // 41-50
30, 30, 30, 29, 29, 29, 29, 29, 28, 28, // 51-60
28, 28, 28, 27, 27, 27, 27, 27, 26, 26, // 61-70
26, 26, 26 }; // 71-73
if (GetClass() == MONK) {
// Have a look to see if we have epic fists on
if (IsClient() && CastToClient()->GetItemIDAt(12) == 10652 && GetLevel() > 50)
return 16;
int level = GetLevel();
if (level > 62)
return GetRace() == IKSAR ? 21 : 20;
return GetRace() == IKSAR ? mnk_iks_delay[level] : mnk_hum_delay[level];
}
else if (GetClass() == BEASTLORD) {
int level = GetLevel();
if (level > 73)
return 25;
return bst_delay[level];
}
return 35;
}
int32 Mob::ReduceDamage(int32 damage)
{
if (damage <= 0)
return damage;
int32 slot = -1;
bool DisableMeleeRune = false;
if (spellbonuses.NegateAttacks[SBIndex::NEGATE_ATK_EXISTS]) {
slot = spellbonuses.NegateAttacks[SBIndex::NEGATE_ATK_BUFFSLOT];
if (slot >= 0) {
if (--buffs[slot].hit_number == 0) {
if (!TryFadeEffect(slot))
BuffFadeBySlot(slot, true);
}
if (spellbonuses.NegateAttacks[SBIndex::NEGATE_ATK_MAX_DMG_ABSORB_PER_HIT] && (damage > spellbonuses.NegateAttacks[SBIndex::NEGATE_ATK_MAX_DMG_ABSORB_PER_HIT]))
damage -= spellbonuses.NegateAttacks[SBIndex::NEGATE_ATK_MAX_DMG_ABSORB_PER_HIT];
else
return DMG_RUNE;
}
}
//Only mitigate if damage is above the minimium specified.
if (spellbonuses.MeleeThresholdGuard[SBIndex::THRESHOLDGUARD_MITIGATION_PERCENT]) {
slot = spellbonuses.MeleeThresholdGuard[SBIndex::THRESHOLDGUARD_BUFFSLOT];
if (slot >= 0 && (damage > spellbonuses.MeleeThresholdGuard[SBIndex::THRESHOLDGUARD_MIN_DMG_TO_TRIGGER]))
{
DisableMeleeRune = true;
int damage_to_reduce = damage * spellbonuses.MeleeThresholdGuard[SBIndex::THRESHOLDGUARD_MITIGATION_PERCENT] / 100;
if (damage_to_reduce >= buffs[slot].melee_rune)
{
LogSpells("Mob::ReduceDamage SE_MeleeThresholdGuard [{}] damage negated, [{}] damage remaining, fading buff", damage_to_reduce, buffs[slot].melee_rune);
damage -= buffs[slot].melee_rune;
if (!TryFadeEffect(slot))
BuffFadeBySlot(slot);
}
else
{
LogSpells("Mob::ReduceDamage SE_MeleeThresholdGuard [{}] damage negated, [{}] damage remaining", damage_to_reduce, buffs[slot].melee_rune);
buffs[slot].melee_rune = (buffs[slot].melee_rune - damage_to_reduce);
damage -= damage_to_reduce;
}
}
}
if (spellbonuses.MitigateMeleeRune[SBIndex::MITIGATION_RUNE_PERCENT] && !DisableMeleeRune) {
slot = spellbonuses.MitigateMeleeRune[SBIndex::MITIGATION_RUNE_BUFFSLOT];
if (slot >= 0)
{
int damage_to_reduce = damage * spellbonuses.MitigateMeleeRune[SBIndex::MITIGATION_RUNE_PERCENT] / 100;
if (spellbonuses.MitigateMeleeRune[SBIndex::MITIGATION_RUNE_MAX_DMG_ABSORB_PER_HIT] && (damage_to_reduce > spellbonuses.MitigateMeleeRune[SBIndex::MITIGATION_RUNE_MAX_DMG_ABSORB_PER_HIT]))
damage_to_reduce = spellbonuses.MitigateMeleeRune[SBIndex::MITIGATION_RUNE_MAX_DMG_ABSORB_PER_HIT];
if (spellbonuses.MitigateMeleeRune[SBIndex::MITIGATION_RUNE_MAX_HP_AMT] && (damage_to_reduce >= buffs[slot].melee_rune))
{
LogSpells("Mob::ReduceDamage SE_MitigateMeleeDamage [{}] damage negated, [{}] damage remaining, fading buff", damage_to_reduce, buffs[slot].melee_rune);
damage -= buffs[slot].melee_rune;
if (!TryFadeEffect(slot))
BuffFadeBySlot(slot);
}
else
{
LogSpells("Mob::ReduceDamage SE_MitigateMeleeDamage [{}] damage negated, [{}] damage remaining", damage_to_reduce, buffs[slot].melee_rune);
if (spellbonuses.MitigateMeleeRune[SBIndex::MITIGATION_RUNE_MAX_HP_AMT])
buffs[slot].melee_rune = (buffs[slot].melee_rune - damage_to_reduce);
damage -= damage_to_reduce;
}
}
}
if (damage < 1)
return DMG_RUNE;
if (spellbonuses.MeleeRune[SBIndex::RUNE_AMOUNT] && spellbonuses.MeleeRune[SBIndex::RUNE_BUFFSLOT] >= 0)
damage = RuneAbsorb(damage, SE_Rune);
if (damage < 1)
return DMG_RUNE;
return(damage);
}
int32 Mob::AffectMagicalDamage(int32 damage, uint16 spell_id, const bool iBuffTic, Mob* attacker)
{
if (damage <= 0)
return damage;
bool DisableSpellRune = false;
int32 slot = -1;
// See if we block the spell outright first
if (!iBuffTic && spellbonuses.NegateAttacks[SBIndex::NEGATE_ATK_EXISTS]) {
slot = spellbonuses.NegateAttacks[SBIndex::NEGATE_ATK_BUFFSLOT];
if (slot >= 0) {
if (--buffs[slot].hit_number == 0) {
if (!TryFadeEffect(slot))
BuffFadeBySlot(slot, true);
}
if (spellbonuses.NegateAttacks[SBIndex::NEGATE_ATK_MAX_DMG_ABSORB_PER_HIT] && (damage > spellbonuses.NegateAttacks[SBIndex::NEGATE_ATK_MAX_DMG_ABSORB_PER_HIT]))
damage -= spellbonuses.NegateAttacks[SBIndex::NEGATE_ATK_MAX_DMG_ABSORB_PER_HIT];
else
return 0;
}
}
// If this is a DoT, use DoT Shielding...
if (iBuffTic) {
damage -= (damage * itembonuses.DoTShielding / 100);
if (spellbonuses.MitigateDotRune[SBIndex::MITIGATION_RUNE_PERCENT]) {
slot = spellbonuses.MitigateDotRune[SBIndex::MITIGATION_RUNE_BUFFSLOT];
if (slot >= 0)
{
int damage_to_reduce = damage * spellbonuses.MitigateDotRune[SBIndex::MITIGATION_RUNE_PERCENT] / 100;
if (spellbonuses.MitigateDotRune[SBIndex::MITIGATION_RUNE_MAX_DMG_ABSORB_PER_HIT] && (damage_to_reduce > spellbonuses.MitigateDotRune[SBIndex::MITIGATION_RUNE_MAX_DMG_ABSORB_PER_HIT]))
damage_to_reduce = spellbonuses.MitigateDotRune[SBIndex::MITIGATION_RUNE_MAX_DMG_ABSORB_PER_HIT];
if (spellbonuses.MitigateDotRune[SBIndex::MITIGATION_RUNE_MAX_HP_AMT] && (damage_to_reduce >= buffs[slot].dot_rune))
{
damage -= buffs[slot].dot_rune;
if (!TryFadeEffect(slot))
BuffFadeBySlot(slot);
}
else
{
if (spellbonuses.MitigateDotRune[SBIndex::MITIGATION_RUNE_MAX_HP_AMT])
buffs[slot].dot_rune = (buffs[slot].dot_rune - damage_to_reduce);
damage -= damage_to_reduce;
}
}
}
}
// This must be a DD then so lets apply Spell Shielding and runes.
else
{
// Reduce damage by the Spell Shielding first so that the runes don't take the raw damage.
damage -= (damage * itembonuses.SpellShield / 100);
//Only mitigate if damage is above the minimium specified.
if (spellbonuses.SpellThresholdGuard[SBIndex::THRESHOLDGUARD_MITIGATION_PERCENT]) {
slot = spellbonuses.SpellThresholdGuard[SBIndex::THRESHOLDGUARD_BUFFSLOT];
if (slot >= 0 && (damage > spellbonuses.MeleeThresholdGuard[SBIndex::THRESHOLDGUARD_MIN_DMG_TO_TRIGGER]))
{
DisableSpellRune = true;
int damage_to_reduce = damage * spellbonuses.SpellThresholdGuard[SBIndex::THRESHOLDGUARD_MITIGATION_PERCENT] / 100;
if (damage_to_reduce >= buffs[slot].magic_rune)
{
damage -= buffs[slot].magic_rune;
if (!TryFadeEffect(slot))
BuffFadeBySlot(slot);
}
else
{
buffs[slot].melee_rune = (buffs[slot].magic_rune - damage_to_reduce);
damage -= damage_to_reduce;
}
}
}
// Do runes now.
if (spellbonuses.MitigateSpellRune[SBIndex::MITIGATION_RUNE_PERCENT] && !DisableSpellRune) {
slot = spellbonuses.MitigateSpellRune[SBIndex::MITIGATION_RUNE_BUFFSLOT];
if (slot >= 0)
{
int damage_to_reduce = damage * spellbonuses.MitigateSpellRune[SBIndex::MITIGATION_RUNE_PERCENT] / 100;
if (spellbonuses.MitigateSpellRune[SBIndex::MITIGATION_RUNE_MAX_DMG_ABSORB_PER_HIT] && (damage_to_reduce > spellbonuses.MitigateSpellRune[SBIndex::MITIGATION_RUNE_MAX_DMG_ABSORB_PER_HIT]))
damage_to_reduce = spellbonuses.MitigateSpellRune[SBIndex::MITIGATION_RUNE_MAX_DMG_ABSORB_PER_HIT];
if (spellbonuses.MitigateSpellRune[SBIndex::MITIGATION_RUNE_MAX_HP_AMT] && (damage_to_reduce >= buffs[slot].magic_rune))
{
LogSpells("Mob::ReduceDamage SE_MitigateSpellDamage [{}] damage negated, [{}] damage remaining, fading buff", damage_to_reduce, buffs[slot].magic_rune);
damage -= buffs[slot].magic_rune;
if (!TryFadeEffect(slot))
BuffFadeBySlot(slot);
}
else
{
LogSpells("Mob::ReduceDamage SE_MitigateMeleeDamage [{}] damage negated, [{}] damage remaining", damage_to_reduce, buffs[slot].magic_rune);
if (spellbonuses.MitigateSpellRune[SBIndex::MITIGATION_RUNE_MAX_HP_AMT])
buffs[slot].magic_rune = (buffs[slot].magic_rune - damage_to_reduce);
damage -= damage_to_reduce;
}
}
}
if (damage < 1)
return 0;
//Regular runes absorb spell damage (except dots) - Confirmed on live.
if (spellbonuses.MeleeRune[SBIndex::RUNE_AMOUNT] && spellbonuses.MeleeRune[SBIndex::RUNE_BUFFSLOT] >= 0)
damage = RuneAbsorb(damage, SE_Rune);
if (spellbonuses.AbsorbMagicAtt[SBIndex::RUNE_AMOUNT] && spellbonuses.AbsorbMagicAtt[SBIndex::RUNE_BUFFSLOT] >= 0)
damage = RuneAbsorb(damage, SE_AbsorbMagicAtt);
if (damage < 1)
return 0;
}
return damage;
}
int32 Mob::ReduceAllDamage(int32 damage)
{
if (damage <= 0)
return damage;
if (spellbonuses.ManaAbsorbPercentDamage) {
int32 mana_reduced = damage * spellbonuses.ManaAbsorbPercentDamage / 100;
if (GetMana() >= mana_reduced) {
damage -= mana_reduced;
SetMana(GetMana() - mana_reduced);
TryTriggerOnCastRequirement();
}
}
if (spellbonuses.EnduranceAbsorbPercentDamage[SBIndex::ENDURANCE_ABSORD_MITIGIATION]) {
int32 damage_reduced = damage * spellbonuses.EnduranceAbsorbPercentDamage[SBIndex::ENDURANCE_ABSORD_MITIGIATION] / 10000; //If hit for 1000, at 10% then lower damage by 100;
int32 endurance_drain = damage_reduced * spellbonuses.EnduranceAbsorbPercentDamage[SBIndex::ENDURANCE_ABSORD_DRAIN_PER_HP] / 10000; //Reduce endurance by 0.05% per HP loss
if (endurance_drain < 1)
endurance_drain = 1;
if (IsClient() && CastToClient()->GetEndurance() >= endurance_drain) {
damage -= damage_reduced;
CastToClient()->SetEndurance(CastToClient()->GetEndurance() - endurance_drain);
TryTriggerOnCastRequirement();
}
}
CheckNumHitsRemaining(NumHit::IncomingDamage);
return(damage);
}
bool Mob::HasProcs() const
{
for (int i = 0; i < MAX_PROCS; i++) {
if (PermaProcs[i].spellID != SPELL_UNKNOWN || SpellProcs[i].spellID != SPELL_UNKNOWN) {
return true;
}
}
if (IsClient()) {
for (int i = 0; i < MAX_AA_PROCS; i += 4) {
if (aabonuses.SpellProc[i]) {
return true;
}
}
}
return false;
}
bool Mob::HasDefensiveProcs() const
{
for (int i = 0; i < MAX_PROCS; i++) {
if (DefensiveProcs[i].spellID != SPELL_UNKNOWN) {
return true;
}
}
if (IsClient()) {
for (int i = 0; i < MAX_AA_PROCS; i += 4) {
if (aabonuses.DefensiveProc[i]) {
return true;
}
}
}
return false;
}
bool Mob::HasSkillProcs() const
{
for (int i = 0; i < MAX_SKILL_PROCS; i++) {
if (spellbonuses.SkillProc[i] || itembonuses.SkillProc[i] || aabonuses.SkillProc[i])
return true;
}
return false;
}
bool Mob::HasSkillProcSuccess() const
{
for (int i = 0; i < MAX_SKILL_PROCS; i++) {
if (spellbonuses.SkillProcSuccess[i] || itembonuses.SkillProcSuccess[i] || aabonuses.SkillProcSuccess[i])
return true;
}
return false;
}
bool Mob::HasRangedProcs() const
{
for (int i = 0; i < MAX_PROCS; i++){
if (RangedProcs[i].spellID != SPELL_UNKNOWN) {
return true;
}
}
if (IsClient()) {
for (int i = 0; i < MAX_AA_PROCS; i += 4) {
if (aabonuses.RangedProc[i]) {
return true;
}
}
}
return false;
}
bool Client::CheckDoubleAttack()
{
int chance = 0;
int skill = GetSkill(EQ::skills::SkillDoubleAttack);
//Check for bonuses that give you a double attack chance regardless of skill (ie Bestial Frenzy/Harmonious Attack AA)
int bonusGiveDA = aabonuses.GiveDoubleAttack + spellbonuses.GiveDoubleAttack + itembonuses.GiveDoubleAttack;
if (skill > 0)
chance = skill + GetLevel();
else if (!bonusGiveDA)
return false;
if (bonusGiveDA)
chance += bonusGiveDA / 100.0f * 500; // convert to skill value
int per_inc = aabonuses.DoubleAttackChance + spellbonuses.DoubleAttackChance + itembonuses.DoubleAttackChance;
if (per_inc)
chance += chance * per_inc / 100;
return zone->random.Int(1, 500) <= chance;
}
// Admittedly these parses were short, but this check worked for 3 toons across multiple levels
// with varying triple attack skill (1-3% error at least)
bool Client::CheckTripleAttack()
{
int chance = GetSkill(EQ::skills::SkillTripleAttack);
if (chance < 1)
return false;
int inc = aabonuses.TripleAttackChance + spellbonuses.TripleAttackChance + itembonuses.TripleAttackChance;
chance = static_cast<int>(chance * (1 + inc / 100.0f));
chance = (chance * 100) / (chance + 800);
return zone->random.Int(1, 100) <= chance;
}
bool Client::CheckDoubleRangedAttack() {
int32 chance = spellbonuses.DoubleRangedAttack + itembonuses.DoubleRangedAttack + aabonuses.DoubleRangedAttack;
if (chance && zone->random.Roll(chance))
return true;
return false;
}
bool Mob::CheckDoubleAttack()
{
// Not 100% certain pets follow this or if it's just from pets not always
// having the same skills as most mobs
int chance = GetSkill(EQ::skills::SkillDoubleAttack);
if (GetLevel() > 35)
chance += GetLevel();
int per_inc = aabonuses.DoubleAttackChance + spellbonuses.DoubleAttackChance + itembonuses.DoubleAttackChance;
if (per_inc)
chance += chance * per_inc / 100;
return zone->random.Int(1, 500) <= chance;
}
void Mob::CommonDamage(Mob* attacker, int &damage, const uint16 spell_id, const EQ::skills::SkillType skill_used, bool &avoidable, const int8 buffslot, const bool iBuffTic, eSpecialAttacks special) {
// This method is called with skill_used=ABJURE for Damage Shield damage.
bool FromDamageShield = (skill_used == EQ::skills::SkillAbjuration);
bool ignore_invul = false;
if (IsValidSpell(spell_id))
ignore_invul = spell_id == SPELL_CAZIC_TOUCH || spells[spell_id].cast_not_standing;
if (!ignore_invul && (GetInvul() || DivineAura())) {
LogCombat("Avoiding [{}] damage due to invulnerability", damage);
damage = DMG_INVULNERABLE;
}
// this should actually happen MUCH sooner, need to investigate though -- good enough for now
if ((skill_used == EQ::skills::SkillArchery || skill_used == EQ::skills::SkillThrowing) && GetSpecialAbility(IMMUNE_RANGED_ATTACKS)) {
LogCombat("Avoiding [{}] damage due to IMMUNE_RANGED_ATTACKS", damage);
damage = DMG_INVULNERABLE;
}
if (spell_id != SPELL_UNKNOWN || attacker == nullptr)
avoidable = false;
// only apply DS if physical damage (no spell damage)
// damage shield calls this function with spell_id set, so its unavoidable
if (attacker && damage > 0 && spell_id == SPELL_UNKNOWN && skill_used != EQ::skills::SkillArchery && skill_used != EQ::skills::SkillThrowing) {
DamageShield(attacker);
}
if (spell_id == SPELL_UNKNOWN && skill_used) {
CheckNumHitsRemaining(NumHit::IncomingHitAttempts);
if (attacker)
attacker->CheckNumHitsRemaining(NumHit::OutgoingHitAttempts);
}
if (attacker) {
if (attacker->IsClient()) {
if (!RuleB(Combat, EXPFromDmgShield)) {
// Damage shield damage shouldn't count towards who gets EXP
if (!attacker->CastToClient()->GetFeigned() && !FromDamageShield)
AddToHateList(attacker, 0, damage, true, false, iBuffTic, spell_id);
}
else {
if (!attacker->CastToClient()->GetFeigned())
AddToHateList(attacker, 0, damage, true, false, iBuffTic, spell_id);
}
}
else
AddToHateList(attacker, 0, damage, true, false, iBuffTic, spell_id);
}
if (damage > 0) {
//if there is some damage being done and theres an attacker involved
if (attacker) {
// if spell is lifetap add hp to the caster
if (spell_id != SPELL_UNKNOWN && IsLifetapSpell(spell_id)) {
int healed = damage;
healed = RuleB(Spells, CompoundLifetapHeals) ? attacker->GetActSpellHealing(spell_id, healed) : healed;
LogCombat("Applying lifetap heal of [{}] to [{}]", healed, attacker->GetName());
attacker->HealDamage(healed);
//we used to do a message to the client, but its gone now.
// emote goes with every one ... even npcs
entity_list.MessageClose(this, true, RuleI(Range, SpellMessages), Chat::Emote, "%s beams a smile at %s", attacker->GetCleanName(), this->GetCleanName());
}
// If a client pet is damaged while sitting, stand, fix sit button,
// and remove sitting regen. Removes bug where client clicks sit
// during battle and gains pet hp-regen and bugs the sit button.
if (IsPet()) {
Mob *owner = this->GetOwner();
if (owner && owner->IsClient()) {
if (GetPetOrder() == SPO_Sit) {
SetPetOrder(SPO_Follow);
}
// fix GUI sit button to be unpressed and stop sitting regen
owner->CastToClient()->SetPetCommandState(PET_BUTTON_SIT, 0);
SetAppearance(eaStanding);
}
}
} //end `if there is some damage being done and theres anattacker person involved`
Mob *pet = GetPet();
// pets that have GHold will never automatically add NPCs
// pets that have Hold and no Focus will add NPCs if they're engaged
// pets that have Hold and Focus will not add NPCs
if (
pet &&
!pet->IsFamiliar() &&
!pet->GetSpecialAbility(IMMUNE_AGGRO) &&
!pet->IsEngaged() &&
attacker &&
!(pet->GetSpecialAbility(IMMUNE_AGGRO_CLIENT) && attacker->IsClient()) &&
!(pet->GetSpecialAbility(IMMUNE_AGGRO_NPC) && attacker->IsNPC()) &&
attacker != this &&
!attacker->IsCorpse() &&
!pet->IsGHeld() &&
!attacker->IsTrap()
) {
if (!pet->IsHeld()) {
LogAggro("Sending pet [{}] into battle due to attack", pet->GetName());
if (IsClient()) {
// if pet was sitting his new mode is follow
// following after the battle (live verified)
if (pet->GetPetOrder() == SPO_Sit) {
pet->SetPetOrder(SPO_Follow);
}
// fix GUI sit button to be unpressed and stop sitting regen
this->CastToClient()->SetPetCommandState(PET_BUTTON_SIT, 0);
pet->SetAppearance(eaStanding);
}
pet->AddToHateList(attacker, 1, 0, true, false, false, spell_id);
pet->SetTarget(attacker);
MessageString(Chat::NPCQuestSay, PET_ATTACKING, pet->GetCleanName(), attacker->GetCleanName());
}
}
//see if any runes want to reduce this damage
if (spell_id == SPELL_UNKNOWN) {
damage = ReduceDamage(damage);
LogCombat("Melee Damage reduced to [{}]", damage);
damage = ReduceAllDamage(damage);
TryTriggerThreshHold(damage, SE_TriggerMeleeThreshold, attacker);
if (skill_used)
CheckNumHitsRemaining(NumHit::IncomingHitSuccess);
}
else {
int32 origdmg = damage;
damage = AffectMagicalDamage(damage, spell_id, iBuffTic, attacker);
if (origdmg != damage && attacker && attacker->IsClient()) {
if (attacker->CastToClient()->GetFilter(FilterDamageShields) != FilterHide)
attacker->Message(Chat::Yellow, "The Spellshield absorbed %d of %d points of damage", origdmg - damage, origdmg);
}
if (damage == 0 && attacker && origdmg != damage && IsClient()) {
//Kayen: Probably need to add a filter for this - Not sure if this msg is correct but there should be a message for spell negate/runes.
Message(263, "%s tries to cast on YOU, but YOUR magical skin absorbs the spell.", attacker->GetCleanName());
}
damage = ReduceAllDamage(damage);
TryTriggerThreshHold(damage, SE_TriggerSpellThreshold, attacker);
}
if (IsClient() && CastToClient()->sneaking) {
CastToClient()->sneaking = false;
SendAppearancePacket(AT_Sneak, 0);
}
if (attacker && attacker->IsClient() && attacker->CastToClient()->sneaking) {
attacker->CastToClient()->sneaking = false;
attacker->SendAppearancePacket(AT_Sneak, 0);
}
//final damage has been determined.
SetHP(GetHP() - damage);
if (HasDied()) {
bool IsSaved = false;
if (TryDivineSave())
IsSaved = true;
if (!IsSaved && !TrySpellOnDeath()) {
SetHP(-500);
if (Death(attacker, damage, spell_id, skill_used)) {
return;
}
}
}
else {
if (GetHPRatio() < 16)
TryDeathSave();
}
TryTriggerOnCastRequirement();
//fade mez if we are mezzed
if (IsMezzed() && attacker) {
LogCombat("Breaking mez due to attack");
entity_list.MessageCloseString(
this, /* Sender */
true, /* Skip Sender */
RuleI(Range, SpellMessages),
Chat::SpellWornOff, /* 284 */
HAS_BEEN_AWAKENED, // %1 has been awakened by %2.
GetCleanName(), /* Message1 */
attacker->GetCleanName() /* Message2 */
);
BuffFadeByEffect(SE_Mez);
}
// broken up for readability
// This is based on what the client is doing
// We had a bunch of stuff like BaseImmunityLevel checks, which I think is suppose to just be for spells
// This is missing some merc checks, but those mostly just skipped the spell bonuses I think ...
bool can_stun = false;
int stunbash_chance = 0; // bonus
if (attacker) {
if (skill_used == EQ::skills::SkillBash) {
can_stun = true;
if (attacker->IsClient())
stunbash_chance = attacker->spellbonuses.StunBashChance +
attacker->itembonuses.StunBashChance +
attacker->aabonuses.StunBashChance;
}
else if (skill_used == EQ::skills::SkillKick &&
(attacker->GetLevel() > 55 || attacker->IsNPC()) && GetClass() == WARRIOR) {
can_stun = true;
}
if ((GetBaseRace() == OGRE || GetBaseRace() == OGGOK_CITIZEN) &&
!attacker->BehindMob(this, attacker->GetX(), attacker->GetY()))
can_stun = false;
if (GetSpecialAbility(UNSTUNABLE))
can_stun = false;
}
if (can_stun) {
int bashsave_roll = zone->random.Int(0, 100);
if (bashsave_roll > 98 || bashsave_roll > (55 - stunbash_chance)) {
// did stun -- roll other resists
// SE_FrontalStunResist description says any angle now a days
int stun_resist2 = spellbonuses.FrontalStunResist + itembonuses.FrontalStunResist +
aabonuses.FrontalStunResist;
if (zone->random.Int(1, 100) > stun_resist2) {
// stun resist 2 failed
// time to check SE_StunResist and mod2 stun resist
int stun_resist =
spellbonuses.StunResist + itembonuses.StunResist + aabonuses.StunResist;
if (zone->random.Int(0, 100) >= stun_resist) {
// did stun
// nothing else to check!
Stun(2000); // straight 2 seconds every time
}
else {
// stun resist passed!
if (IsClient())
MessageString(Chat::Stun, SHAKE_OFF_STUN);
}
}
else {
// stun resist 2 passed!
if (IsClient())
MessageString(Chat::Stun, AVOID_STUNNING_BLOW);
}
}
else {
// main stun failed -- extra interrupt roll
if (IsCasting() &&
!EQ::ValueWithin(casting_spell_id, 859, 1023)) // these spells are excluded
// 90% chance >< -- stun immune won't reach this branch though :(
if (zone->random.Int(0, 9) > 1)
InterruptSpell();
}
}
if (spell_id != SPELL_UNKNOWN && !iBuffTic) {
//see if root will break
if (IsRooted() && !FromDamageShield) // neotoyko: only spells cancel root
TryRootFadeByDamage(buffslot, attacker);
}
else if (spell_id == SPELL_UNKNOWN)
{
//increment chances of interrupting
if (IsCasting()) { //shouldnt interrupt on regular spell damage
attacked_count++;
LogCombat("Melee attack while casting. Attack count [{}]", attacked_count);
}
}
//send an HP update if we are hurt
if (GetHP() < GetMaxHP())
SendHPUpdate(); // the OP_Damage actually updates the client in these cases, so we skip the HP update for them
} //end `if damage was done`
//send damage packet...
if (!iBuffTic) { //buff ticks do not send damage, instead they just call SendHPUpdate(), which is done above
auto outapp = new EQApplicationPacket(OP_Damage, sizeof(CombatDamage_Struct));
CombatDamage_Struct* a = (CombatDamage_Struct*)outapp->pBuffer;
a->target = GetID();
if (attacker == nullptr)
a->source = 0;
else if (attacker->IsClient() && attacker->CastToClient()->GMHideMe())
a->source = 0;
else
a->source = attacker->GetID();
a->type = SkillDamageTypes[skill_used]; // was 0x1c
a->damage = damage;
a->spellid = spell_id;
if (special == eSpecialAttacks::AERampage)
a->special = 1;
else if (special == eSpecialAttacks::Rampage)
a->special = 2;
else
a->special = 0;
a->hit_heading = attacker ? attacker->GetHeading() : 0.0f;
if (RuleB(Combat, MeleePush) && damage > 0 && !IsRooted() &&
(IsClient() || zone->random.Roll(RuleI(Combat, MeleePushChance)))) {
a->force = EQ::skills::GetSkillMeleePushForce(skill_used);
if (IsNPC()) {
if (attacker->IsNPC())
a->force = 0.0f; // 2013 change that disabled NPC vs NPC push
else
a->force *= 0.10f; // force against NPCs is divided by 10 I guess? ex bash is 0.3, parsed 0.03 against an NPC
if (ForcedMovement == 0 && a->force != 0.0f && position_update_melee_push_timer.Check()) {
m_Delta.x += a->force * g_Math.FastSin(a->hit_heading);
m_Delta.y += a->force * g_Math.FastCos(a->hit_heading);
ForcedMovement = 3;
}
}
}
//Note: if players can become pets, they will not receive damage messages of their own
//this was done to simplify the code here (since we can only effectively skip one mob on queue)
eqFilterType filter;
Mob *skip = attacker;
if (attacker && attacker->GetOwnerID()) {
//attacker is a pet, let pet owners see their pet's damage
Mob* owner = attacker->GetOwner();
if (owner && owner->IsClient()) {
if (((spell_id != SPELL_UNKNOWN) || (FromDamageShield)) && damage>0) {
//special crap for spell damage, looks hackish to me
char val1[20] = { 0 };
owner->MessageString(Chat::NonMelee, OTHER_HIT_NONMELEE, GetCleanName(), ConvertArray(damage, val1));
}
else {
if (damage > 0) {
if (spell_id != SPELL_UNKNOWN)
filter = iBuffTic ? FilterDOT : FilterSpellDamage;
else
filter = FilterPetHits;
}
else if (damage == -5)
filter = FilterNone; //cant filter invulnerable
else
filter = FilterPetMisses;
if (!FromDamageShield)
owner->CastToClient()->QueuePacket(outapp, true, CLIENT_CONNECTED, filter);
}
}
skip = owner;
}
else {
//attacker is not a pet, send to the attacker
//if the attacker is a client, try them with the correct filter
if (attacker && attacker->IsClient()) {
if ((spell_id != SPELL_UNKNOWN || FromDamageShield) && damage > 0) {
//special crap for spell damage, looks hackish to me
char val1[20] = { 0 };
if (FromDamageShield) {
if (attacker->CastToClient()->GetFilter(FilterDamageShields) != FilterHide)
attacker->MessageString(Chat::DamageShield, OTHER_HIT_NONMELEE, GetCleanName(), ConvertArray(damage, val1));
}
else {
entity_list.MessageCloseString(
this, /* Sender */
true, /* Skip Sender */
RuleI(Range, SpellMessages),
Chat::NonMelee, /* 283 */
HIT_NON_MELEE, /* %1 hit %2 for %3 points of non-melee damage. */
attacker->GetCleanName(), /* Message1 */
GetCleanName(), /* Message2 */
ConvertArray(damage, val1) /* Message3 */
);
}
}
else {
if (damage > 0) {
if (spell_id != SPELL_UNKNOWN)
filter = iBuffTic ? FilterDOT : FilterSpellDamage;
else
filter = FilterNone; //cant filter our own hits
}
else if (damage == -5)
filter = FilterNone; //cant filter invulnerable
else
filter = FilterMyMisses;
attacker->CastToClient()->QueuePacket(outapp, true, CLIENT_CONNECTED, filter);
}
}
skip = attacker;
}
//send damage to all clients around except the specified skip mob (attacker or the attacker's owner) and ourself
if (damage > 0) {
if (spell_id != SPELL_UNKNOWN)
filter = iBuffTic ? FilterDOT : FilterSpellDamage;
else
filter = FilterOthersHit;
}
else if (damage == -5)
filter = FilterNone; //cant filter invulnerable
else
filter = FilterOthersMiss;
//make attacker (the attacker) send the packet so we can skip them and the owner
//this call will send the packet to `this` as well (using the wrong filter) (will not happen until PC charm works)
// If this is Damage Shield damage, the correct OP_Damage packets will be sent from Mob::DamageShield, so
// we don't send them here.
if (!FromDamageShield) {
entity_list.QueueCloseClients(
this, /* Sender */
outapp, /* packet */
true, /* Skip Sender */
RuleI(Range, SpellMessages),
skip, /* Skip this mob */
true, /* Packet ACK */
filter /* eqFilterType filter */
);
//send the damage to ourself if we are a client
if (IsClient()) {
//I dont think any filters apply to damage affecting us
CastToClient()->QueuePacket(outapp);
}
}
safe_delete(outapp);
}
else {
//else, it is a buff tic...
// So we can see our dot dmg like live shows it.
if (spell_id != SPELL_UNKNOWN && damage > 0 && attacker && attacker != this && attacker->IsClient()) {
//might filter on (attack_skill>200 && attack_skill<250), but I dont think we need it
attacker->FilteredMessageString(attacker, Chat::DotDamage, FilterDOT,
YOUR_HIT_DOT, GetCleanName(), itoa(damage), spells[spell_id].name);
/* older clients don't have the below String ID, but it will be filtered */
entity_list.FilteredMessageCloseString(
attacker, /* Sender */
true, /* Skip Sender */
RuleI(Range, SpellMessages),
Chat::DotDamage, /* Type: 325 */
FilterDOT, /* FilterType: 19 */
OTHER_HIT_DOT, /* MessageFormat: %1 has taken %2 damage from %3 by %4. */
GetCleanName(), /* Message1 */
itoa(damage), /* Message2 */
attacker->GetCleanName(), /* Message3 */
spells[spell_id].name /* Message4 */
);
}
} //end packet sending
}
void Mob::HealDamage(uint32 amount, Mob *caster, uint16 spell_id)
{
int32 maxhp = GetMaxHP();
int32 curhp = GetHP();
uint32 acthealed = 0;
if (amount > (maxhp - curhp))
acthealed = (maxhp - curhp);
else
acthealed = amount;
if (acthealed > 100) {
if (caster) {
if (IsBuffSpell(spell_id)) { // hots
// message to caster
if (caster->IsClient() && caster == this) {
if (caster->CastToClient()->ClientVersionBit() & EQ::versions::maskSoFAndLater)
FilteredMessageString(caster, Chat::NonMelee, FilterHealOverTime,
HOT_HEAL_SELF, itoa(acthealed), spells[spell_id].name);
else
FilteredMessageString(caster, Chat::NonMelee, FilterHealOverTime,
YOU_HEALED, GetCleanName(), itoa(acthealed));
}
else if (caster->IsClient() && caster != this) {
if (caster->CastToClient()->ClientVersionBit() & EQ::versions::maskSoFAndLater)
caster->FilteredMessageString(caster, Chat::NonMelee, FilterHealOverTime,
HOT_HEAL_OTHER, GetCleanName(), itoa(acthealed),
spells[spell_id].name);
else
caster->FilteredMessageString(caster, Chat::NonMelee, FilterHealOverTime,
YOU_HEAL, GetCleanName(), itoa(acthealed));
}
// message to target
if (IsClient() && caster != this) {
if (CastToClient()->ClientVersionBit() & EQ::versions::maskSoFAndLater)
FilteredMessageString(this, Chat::NonMelee, FilterHealOverTime,
HOT_HEALED_OTHER, caster->GetCleanName(),
itoa(acthealed), spells[spell_id].name);
else
FilteredMessageString(this, Chat::NonMelee, FilterHealOverTime,
YOU_HEALED, caster->GetCleanName(), itoa(acthealed));
}
}
else { // normal heals
FilteredMessageString(caster, Chat::NonMelee, FilterSpellDamage,
YOU_HEALED, caster->GetCleanName(), itoa(acthealed));
if (caster != this)
caster->FilteredMessageString(caster, Chat::NonMelee, FilterSpellDamage,
YOU_HEAL, GetCleanName(), itoa(acthealed));
}
}
else {
Message(Chat::NonMelee, "You have been healed for %d points of damage.", acthealed);
}
}
if (curhp < maxhp) {
if ((curhp + amount) > maxhp)
curhp = maxhp;
else
curhp += amount;
SetHP(curhp);
SendHPUpdate();
}
}
//proc chance includes proc bonus
float Mob::GetProcChances(float ProcBonus, uint16 hand)
{
int mydex = GetDEX();
float ProcChance = 0.0f;
uint32 weapon_speed = GetWeaponSpeedbyHand(hand);
if (RuleB(Combat, AdjustProcPerMinute)) {
ProcChance = (static_cast<float>(weapon_speed) *
RuleR(Combat, AvgProcsPerMinute) / 60000.0f); // compensate for weapon_speed being in ms
ProcBonus += static_cast<float>(mydex) * RuleR(Combat, ProcPerMinDexContrib);
ProcChance += ProcChance * ProcBonus / 100.0f;
}
else {
ProcChance = RuleR(Combat, BaseProcChance) +
static_cast<float>(mydex) / RuleR(Combat, ProcDexDivideBy);
ProcChance += ProcChance * ProcBonus / 100.0f;
}
LogCombat("Proc chance [{}] ([{}] from bonuses)", ProcChance, ProcBonus);
return ProcChance;
}
float Mob::GetDefensiveProcChances(float &ProcBonus, float &ProcChance, uint16 hand, Mob* on) {
if (!on)
return ProcChance;
int myagi = on->GetAGI();
ProcBonus = 0;
ProcChance = 0;
uint32 weapon_speed = GetWeaponSpeedbyHand(hand);
ProcChance = (static_cast<float>(weapon_speed) * RuleR(Combat, AvgDefProcsPerMinute) / 60000.0f); // compensate for weapon_speed being in ms
ProcBonus += static_cast<float>(myagi) * RuleR(Combat, DefProcPerMinAgiContrib) / 100.0f;
ProcChance = ProcChance + (ProcChance * ProcBonus);
LogCombat("Defensive Proc chance [{}] ([{}] from bonuses)", ProcChance, ProcBonus);
return ProcChance;
}
// argument 'weapon' not used
void Mob::TryDefensiveProc(Mob *on, uint16 hand) {
if (!on) {
SetTarget(nullptr);
LogError("A null Mob object was passed to Mob::TryDefensiveProc for evaluation!");
return;
}
if (!HasDefensiveProcs()) {
return;
}
if (!on->HasDied() && on->GetHP() > 0) {
float ProcChance, ProcBonus;
on->GetDefensiveProcChances(ProcBonus, ProcChance, hand, this);
if (hand == EQ::invslot::slotSecondary) {
ProcChance /= 2;
}
int level_penalty = 0;
int level_diff = GetLevel() - on->GetLevel();
if (level_diff > 6) {//10% penalty per level if > 6 levels over target.
level_penalty = (level_diff - 6) * 10;
}
ProcChance -= ProcChance*level_penalty / 100;
if (ProcChance < 0) {
return;
}
//Spell Procs and Quest added procs
for (int i = 0; i < MAX_PROCS; i++) {
if (IsValidSpell(DefensiveProcs[i].spellID)) {
if (!IsProcLimitTimerActive(DefensiveProcs[i].base_spellID, DefensiveProcs[i].proc_reuse_time, SE_DefensiveProc)) {
float chance = ProcChance * (static_cast<float>(DefensiveProcs[i].chance) / 100.0f);
if (zone->random.Roll(chance)) {
ExecWeaponProc(nullptr, DefensiveProcs[i].spellID, on);
CheckNumHitsRemaining(NumHit::DefensiveSpellProcs, 0, DefensiveProcs[i].base_spellID);
SetProcLimitTimer(DefensiveProcs[i].base_spellID, DefensiveProcs[i].proc_reuse_time, SE_DefensiveProc);
}
}
}
}
//AA Procs
if (IsClient()){
for (int i = 0; i < MAX_AA_PROCS; i += 4) {
int32 aa_rank_id = aabonuses.DefensiveProc[i];
int32 aa_spell_id = aabonuses.DefensiveProc[i + 1];
int32 aa_proc_chance = 100 + aabonuses.DefensiveProc[i + 2];
uint32 aa_proc_reuse_timer = aabonuses.DefensiveProc[i + 3];
if (aa_rank_id) {
if (!IsProcLimitTimerActive(-aa_rank_id, aa_proc_reuse_timer, SE_DefensiveProc)) {
float chance = ProcChance * (static_cast<float>(aa_proc_chance) / 100.0f);
if (zone->random.Roll(chance) && IsValidSpell(aa_spell_id)) {
ExecWeaponProc(nullptr, aa_spell_id, on);
SetProcLimitTimer(-aa_rank_id, aa_proc_reuse_timer, SE_DefensiveProc);
}
}
}
}
}
}
}
void Mob::TryWeaponProc(const EQ::ItemInstance* weapon_g, Mob *on, uint16 hand) {
if (!on) {
SetTarget(nullptr);
LogError("A null Mob object was passed to Mob::TryWeaponProc for evaluation!");
return;
}
if (!IsAttackAllowed(on)) {
LogCombat("Preventing procing off of unattackable things");
return;
}
if (DivineAura()) {
LogCombat("Procs cancelled, Divine Aura is in effect");
return;
}
if (!weapon_g) {
TrySpellProc(nullptr, (const EQ::ItemData*)nullptr, on);
return;
}
if (!weapon_g->IsClassCommon()) {
TrySpellProc(nullptr, (const EQ::ItemData*)nullptr, on);
return;
}
// Innate + aug procs from weapons
// TODO: powersource procs -- powersource procs are on invis augs, so shouldn't need anything extra
TryWeaponProc(weapon_g, weapon_g->GetItem(), on, hand);
// Procs from Buffs and AA both melee and range
TrySpellProc(weapon_g, weapon_g->GetItem(), on, hand);
return;
}
void Mob::TryWeaponProc(const EQ::ItemInstance *inst, const EQ::ItemData *weapon, Mob *on, uint16 hand)
{
if (!on) {
return;
}
if (!weapon)
return;
uint16 skillinuse = 28;
int ourlevel = GetLevel();
float ProcBonus = static_cast<float>(aabonuses.ProcChanceSPA +
spellbonuses.ProcChanceSPA + itembonuses.ProcChanceSPA);
ProcBonus += static_cast<float>(itembonuses.ProcChance) / 10.0f; // Combat Effects
float ProcChance = GetProcChances(ProcBonus, hand);
if (hand == EQ::invslot::slotSecondary)
ProcChance /= 2;
// Try innate proc on weapon
// We can proc once here, either weapon or one aug
bool proced = false; // silly bool to prevent augs from going if weapon does
skillinuse = GetSkillByItemType(weapon->ItemType);
if (weapon->Proc.Type == EQ::item::ItemEffectCombatProc && IsValidSpell(weapon->Proc.Effect)) {
float WPC = ProcChance * (100.0f + // Proc chance for this weapon
static_cast<float>(weapon->ProcRate)) / 100.0f;
if (zone->random.Roll(WPC)) { // 255 dex = 0.084 chance of proc. No idea what this number should be really.
if (weapon->Proc.Level2 > ourlevel) {
LogCombat("Tried to proc ([{}]), but our level ([{}]) is lower than required ([{}])",
weapon->Name, ourlevel, weapon->Proc.Level2);
if (IsPet()) {
Mob *own = GetOwner();
if (own)
own->MessageString(Chat::Red, PROC_PETTOOLOW);
}
else {
MessageString(Chat::Red, PROC_TOOLOW);
}
}
else {
LogCombat("Attacking weapon ([{}]) successfully procing spell [{}] ([{}] percent chance)", weapon->Name, weapon->Proc.Effect, WPC * 100);
ExecWeaponProc(inst, weapon->Proc.Effect, on);
proced = true;
}
}
}
//If OneProcPerWeapon is not enabled, we reset the try for that weapon regardless of if we procced or not.
//This is for some servers that may want to have as many procs triggering from weapons as possible in a single round.
if (!RuleB(Combat, OneProcPerWeapon))
proced = false;
if (!proced && inst) {
for (int r = EQ::invaug::SOCKET_BEGIN; r <= EQ::invaug::SOCKET_END; r++) {
const EQ::ItemInstance *aug_i = inst->GetAugment(r);
if (!aug_i) // no aug, try next slot!
continue;
const EQ::ItemData *aug = aug_i->GetItem();
if (!aug)
continue;
if (aug->Proc.Type == EQ::item::ItemEffectCombatProc && IsValidSpell(aug->Proc.Effect)) {
float APC = ProcChance * (100.0f + // Proc chance for this aug
static_cast<float>(aug->ProcRate)) / 100.0f;
if (zone->random.Roll(APC)) {
if (aug->Proc.Level2 > ourlevel) {
if (IsPet()) {
Mob *own = GetOwner();
if (own)
own->MessageString(Chat::Red, PROC_PETTOOLOW);
}
else {
MessageString(Chat::Red, PROC_TOOLOW);
}
}
else {
ExecWeaponProc(aug_i, aug->Proc.Effect, on);
if (RuleB(Combat, OneProcPerWeapon))
break;
}
}
}
}
}
// TODO: Powersource procs -- powersource procs are from augs so shouldn't need anything extra
return;
}
void Mob::TrySpellProc(const EQ::ItemInstance *inst, const EQ::ItemData *weapon, Mob *on, uint16 hand)
{
if (!on) {
return;
}
float ProcBonus = static_cast<float>(spellbonuses.SpellProcChance +
itembonuses.SpellProcChance + aabonuses.SpellProcChance);
float ProcChance = 0.0f;
ProcChance = GetProcChances(ProcBonus, hand);
if (hand == EQ::invslot::slotSecondary)
ProcChance /= 2;
bool rangedattk = false;
if (weapon && hand == EQ::invslot::slotRange) {
if (weapon->ItemType == EQ::item::ItemTypeArrow ||
weapon->ItemType == EQ::item::ItemTypeLargeThrowing ||
weapon->ItemType == EQ::item::ItemTypeSmallThrowing ||
weapon->ItemType == EQ::item::ItemTypeBow) {
rangedattk = true;
}
}
if (!weapon && hand == EQ::invslot::slotRange && GetSpecialAbility(SPECATK_RANGED_ATK))
rangedattk = true;
int16 poison_slot=-1;
for (uint32 i = 0; i < MAX_PROCS; i++) {
if (IsPet() && hand != EQ::invslot::slotPrimary) //Pets can only proc spell procs from their primay hand (ie; beastlord pets)
continue; // If pets ever can proc from off hand, this will need to change
if (SpellProcs[i].base_spellID == POISON_PROC &&
(!weapon || weapon->ItemType != EQ::item::ItemType1HPiercing))
continue; // Old school poison will only proc with 1HP equipped.
// Not ranged
if (!rangedattk) {
// Perma procs (Not used for AA, they are handled below)
if (PermaProcs[i].spellID != SPELL_UNKNOWN) {
if (zone->random.Roll(PermaProcs[i].chance)) { // TODO: Do these get spell bonus?
LogCombat("Permanent proc [{}] procing spell [{}] ([{}] percent chance)", i, PermaProcs[i].spellID, PermaProcs[i].chance);
ExecWeaponProc(nullptr, PermaProcs[i].spellID, on);
}
else {
LogCombat("Permanent proc [{}] failed to proc [{}] ([{}] percent chance)", i, PermaProcs[i].spellID, PermaProcs[i].chance);
}
}
// Spell procs (buffs)
if (SpellProcs[i].spellID != SPELL_UNKNOWN) {
if (SpellProcs[i].base_spellID == POISON_PROC) {
poison_slot=i;
continue; // Process the poison proc last per @mackal
}
if (!IsProcLimitTimerActive(SpellProcs[i].base_spellID, SpellProcs[i].proc_reuse_time, SE_WeaponProc)) {
float chance = ProcChance * (static_cast<float>(SpellProcs[i].chance) / 100.0f);
if (zone->random.Roll(chance)) {
LogCombat("Spell proc [{}] procing spell [{}] ([{}] percent chance)", i, SpellProcs[i].spellID, chance);
SendBeginCast(SpellProcs[i].spellID, 0);
ExecWeaponProc(nullptr, SpellProcs[i].spellID, on, SpellProcs[i].level_override);
SetProcLimitTimer(SpellProcs[i].base_spellID, SpellProcs[i].proc_reuse_time, SE_WeaponProc);
CheckNumHitsRemaining(NumHit::OffensiveSpellProcs, 0, SpellProcs[i].base_spellID);
}
else {
LogCombat("Spell proc [{}] failed to proc [{}] ([{}] percent chance)", i, SpellProcs[i].spellID, chance);
}
}
}
}
else if (rangedattk) { // ranged only
// ranged spell procs (buffs)
if (RangedProcs[i].spellID != SPELL_UNKNOWN) {
if (!IsProcLimitTimerActive(RangedProcs[i].base_spellID, RangedProcs[i].proc_reuse_time, SE_RangedProc)) {
float chance = ProcChance * (static_cast<float>(RangedProcs[i].chance) / 100.0f);
if (zone->random.Roll(chance)) {
LogCombat("Ranged proc [{}] procing spell [{}] ([{}] percent chance)", i, RangedProcs[i].spellID, chance);
ExecWeaponProc(nullptr, RangedProcs[i].spellID, on);
CheckNumHitsRemaining(NumHit::OffensiveSpellProcs, 0, RangedProcs[i].base_spellID);
SetProcLimitTimer(RangedProcs[i].base_spellID, RangedProcs[i].proc_reuse_time, SE_RangedProc);
}
else {
LogCombat("Ranged proc [{}] failed to proc [{}] ([{}] percent chance)", i, RangedProcs[i].spellID, chance);
}
}
}
}
}
//AA Procs
if (IsClient()) {
for (int i = 0; i < MAX_AA_PROCS; i += 4) {
int32 aa_rank_id = 0;
int32 aa_spell_id = SPELL_UNKNOWN;
int32 aa_proc_chance = 100;
uint32 aa_proc_reuse_timer = 0;
int proc_type = 0; //used to deterimne which timer array is used.
if (!rangedattk) {
aa_rank_id = aabonuses.SpellProc[i];
aa_spell_id = aabonuses.SpellProc[i + 1];
aa_proc_chance += aabonuses.SpellProc[i + 2];
aa_proc_reuse_timer = aabonuses.SpellProc[i + 3];
proc_type = SE_WeaponProc;
}
else {
aa_rank_id = aabonuses.RangedProc[i];
aa_spell_id = aabonuses.RangedProc[i + 1];
aa_proc_chance += aabonuses.RangedProc[i + 2];
aa_proc_reuse_timer = aabonuses.RangedProc[i + 3];
proc_type = SE_RangedProc;
}
if (aa_rank_id) {
if (!IsProcLimitTimerActive(-aa_rank_id, aa_proc_reuse_timer, proc_type)) {
float chance = ProcChance * (static_cast<float>(aa_proc_chance) / 100.0f);
if (zone->random.Roll(chance) && IsValidSpell(aa_spell_id)) {
LogCombat("AA proc [{}] procing spell [{}] ([{}] percent chance)", aa_rank_id, aa_spell_id, chance);
ExecWeaponProc(nullptr, aa_spell_id, on);
SetProcLimitTimer(-aa_rank_id, aa_proc_reuse_timer, proc_type);
}
else {
LogCombat("AA proc [{}] failed to proc [{}] ([{}] percent chance)", aa_rank_id, aa_spell_id, chance);
}
}
}
}
}
if (poison_slot > -1) {
bool one_shot = !RuleB(Combat, UseExtendedPoisonProcs);
float chance = (one_shot) ? 100.0f : ProcChance * (static_cast<float>(SpellProcs[poison_slot].chance) / 100.0f);
uint16 spell_id = SpellProcs[poison_slot].spellID;
if (zone->random.Roll(chance)) {
LogCombat("Poison proc [{}] procing spell [{}] ([{}] percent chance)", poison_slot, spell_id, chance);
SendBeginCast(spell_id, 0);
ExecWeaponProc(nullptr, spell_id, on, SpellProcs[poison_slot].level_override);
if (one_shot) {
RemoveProcFromWeapon(spell_id);
}
}
}
if (HasSkillProcs() && hand != EQ::invslot::slotRange) { //We check ranged skill procs within the attack functions.
uint16 skillinuse = 28;
if (weapon)
skillinuse = GetSkillByItemType(weapon->ItemType);
TrySkillProc(on, skillinuse, 0, false, hand);
}
return;
}
void Mob::TryPetCriticalHit(Mob *defender, DamageHitInfo &hit)
{
if (hit.damage_done < 1)
return;
// Allows pets to perform critical hits.
// Each rank adds an additional 1% chance for any melee hit (primary, secondary, kick, bash, etc) to critical,
// dealing up to 63% more damage. http://www.magecompendium.com/aa-short-library.html
// appears to be 70% damage, unsure if changed or just bad info before
Mob *owner = nullptr;
int critChance = 0;
critChance += RuleI(Combat, PetBaseCritChance); // 0 by default
int critMod = 170;
if (IsPet())
owner = GetOwner();
else if ((IsNPC() && CastToNPC()->GetSwarmOwner()))
owner = entity_list.GetMobID(CastToNPC()->GetSwarmOwner());
else
return;
if (!owner)
return;
int CritPetChance =
owner->aabonuses.PetCriticalHit + owner->itembonuses.PetCriticalHit + owner->spellbonuses.PetCriticalHit;
if (CritPetChance || critChance)
// For pets use PetCriticalHit for base chance, pets do not innately critical with without it
critChance += CritPetChance;
if (critChance > 0) {
if (zone->random.Roll(critChance)) {
critMod += GetCritDmgMod(hit.skill, owner);
hit.damage_done += 5;
hit.damage_done = (hit.damage_done * critMod) / 100;
entity_list.FilteredMessageCloseString(
this, /* Sender */
false, /* Skip Sender */
RuleI(Range, CriticalDamage),
Chat::MeleeCrit, /* Type: 301 */
FilterMeleeCrits, /* FilterType: 12 */
CRITICAL_HIT, /* MessageFormat: %1 scores a critical hit! (%2) */
GetCleanName(), /* Message1 */
itoa(hit.damage_done + hit.min_damage) /* Message2 */
);
}
}
}
void Mob::TryCriticalHit(Mob *defender, DamageHitInfo &hit, ExtraAttackOptions *opts)
{
#ifdef LUA_EQEMU
bool ignoreDefault = false;
LuaParser::Instance()->TryCriticalHit(this, defender, hit, opts, ignoreDefault);
if (ignoreDefault) {
return;
}
#endif
if (hit.damage_done < 1 || !defender)
return;
// decided to branch this into it's own function since it's going to be duplicating a lot of the
// code in here, but could lead to some confusion otherwise
if ((IsPet() && GetOwner()->IsClient()) || (IsNPC() && CastToNPC()->GetSwarmOwner())) {
TryPetCriticalHit(defender, hit);
return;
}
#ifdef BOTS
if (this->IsPet() && this->GetOwner() && this->GetOwner()->IsBot()) {
this->TryPetCriticalHit(defender, hit);
return;
}
#endif // BOTS
if (IsNPC() && !RuleB(Combat, NPCCanCrit))
return;
// 1: Try Slay Undead
if (defender->GetBodyType() == BT_Undead || defender->GetBodyType() == BT_SummonedUndead ||
defender->GetBodyType() == BT_Vampire) {
int SlayRateBonus = aabonuses.SlayUndead[SBIndex::SLAYUNDEAD_RATE_MOD] + itembonuses.SlayUndead[SBIndex::SLAYUNDEAD_RATE_MOD] + spellbonuses.SlayUndead[SBIndex::SLAYUNDEAD_RATE_MOD];
if (SlayRateBonus) {
float slayChance = static_cast<float>(SlayRateBonus) / 10000.0f;
if (zone->random.Roll(slayChance)) {
int SlayDmgBonus = std::max(
{aabonuses.SlayUndead[SBIndex::SLAYUNDEAD_DMG_MOD], itembonuses.SlayUndead[SBIndex::SLAYUNDEAD_DMG_MOD], spellbonuses.SlayUndead[SBIndex::SLAYUNDEAD_DMG_MOD] });
hit.damage_done = std::max(hit.damage_done, hit.base_damage) + 5;
hit.damage_done = (hit.damage_done * SlayDmgBonus) / 100;
/* Female */
if (GetGender() == 1) {
entity_list.FilteredMessageCloseString(
this, /* Sender */
false, /* Skip Sender */
RuleI(Range, CriticalDamage),
Chat::MeleeCrit, /* Type: 301 */
FilterMeleeCrits, /* FilterType: 12 */
FEMALE_SLAYUNDEAD, /* MessageFormat: %1's holy blade cleanses her target!(%2) */
GetCleanName(), /* Message1 */
itoa(hit.damage_done + hit.min_damage) /* Message2 */
);
}
/* Males and Neuter */
else {
entity_list.FilteredMessageCloseString(
this, /* Sender */
false, /* Skip Sender */
RuleI(Range, CriticalDamage),
Chat::MeleeCrit, /* Type: 301 */
FilterMeleeCrits, /* FilterType: 12 */
MALE_SLAYUNDEAD, /* MessageFormat: %1's holy blade cleanses his target!(%2) */
GetCleanName(), /* Message1 */
itoa(hit.damage_done + hit.min_damage) /* Message2 */
);
}
return;
}
}
}
// 2: Try Melee Critical
// a lot of good info: http://giline.versus.jp/shiden/damage_e.htm, http://giline.versus.jp/shiden/su.htm
// We either require an innate crit chance or some SPA 169 to crit
bool innate_crit = false;
int crit_chance = GetCriticalChanceBonus(hit.skill);
if ((GetClass() == WARRIOR || GetClass() == BERSERKER) && GetLevel() >= 12)
innate_crit = true;
else if (GetClass() == RANGER && GetLevel() >= 12 && hit.skill == EQ::skills::SkillArchery)
innate_crit = true;
else if (GetClass() == ROGUE && GetLevel() >= 12 && hit.skill == EQ::skills::SkillThrowing)
innate_crit = true;
// we have a chance to crit!
if (innate_crit || crit_chance) {
int difficulty = 0;
if (hit.skill == EQ::skills::SkillArchery)
difficulty = RuleI(Combat, ArcheryCritDifficulty);
else if (hit.skill == EQ::skills::SkillThrowing)
difficulty = RuleI(Combat, ThrowingCritDifficulty);
else
difficulty = RuleI(Combat, MeleeCritDifficulty);
int roll = zone->random.Int(1, difficulty);
int dex_bonus = GetDEX();
if (dex_bonus > 255)
dex_bonus = 255 + ((dex_bonus - 255) / 5);
dex_bonus += 45; // chances did not match live without a small boost
// so if we have an innate crit we have a better chance, except for ber throwing
if (!innate_crit || (GetClass() == BERSERKER && hit.skill == EQ::skills::SkillThrowing))
dex_bonus = dex_bonus * 3 / 5;
if (crit_chance)
dex_bonus += dex_bonus * crit_chance / 100;
// check if we crited
if (roll < dex_bonus) {
// step 1: check for finishing blow
if (TryFinishingBlow(defender, hit.damage_done))
return;
// step 2: calculate damage
hit.damage_done = std::max(hit.damage_done, hit.base_damage) + 5;
int og_damage = hit.damage_done;
int crit_mod = 170 + GetCritDmgMod(hit.skill);
if (crit_mod < 100) {
crit_mod = 100;
}
hit.damage_done = hit.damage_done * crit_mod / 100;
LogCombat("Crit success roll [{}] dex chance [{}] og dmg [{}] crit_mod [{}] new dmg [{}]", roll, dex_bonus, og_damage, crit_mod, hit.damage_done);
// step 3: check deadly strike
if (GetClass() == ROGUE && hit.skill == EQ::skills::SkillThrowing) {
if (BehindMob(defender, GetX(), GetY())) {
int chance = GetLevel() * 12;
if (zone->random.Int(1, 1000) < chance) {
// step 3a: check assassinate
int assdmg = TryAssassinate(defender, hit.skill); // I don't think this is right
if (assdmg) {
hit.damage_done = assdmg;
return;
}
hit.damage_done = hit.damage_done * 200 / 100;
entity_list.FilteredMessageCloseString(
this, /* Sender */
false, /* Skip Sender */
RuleI(Range, CriticalDamage),
Chat::MeleeCrit, /* Type: 301 */
FilterMeleeCrits, /* FilterType: 12 */
DEADLY_STRIKE, /* MessageFormat: %1 scores a Deadly Strike!(%2) */
GetCleanName(), /* Message1 */
itoa(hit.damage_done + hit.min_damage) /* Message2 */
);
return;
}
}
}
// step 4: check crips
// this SPA was reused on live ...
bool berserk = spellbonuses.BerserkSPA || itembonuses.BerserkSPA || aabonuses.BerserkSPA;
if (!berserk) {
if (zone->random.Roll(GetCrippBlowChance())) {
berserk = true;
} // TODO: Holyforge is suppose to have an innate extra undead chance? 1/5 which matches the SPA crip though ...
}
if (IsBerserk() || berserk) {
hit.damage_done += og_damage * 119 / 100;
LogCombat("Crip damage [{}]", hit.damage_done);
entity_list.FilteredMessageCloseString(
this, /* Sender */
false, /* Skip Sender */
RuleI(Range, CriticalDamage),
Chat::MeleeCrit, /* Type: 301 */
FilterMeleeCrits, /* FilterType: 12 */
CRIPPLING_BLOW, /* MessageFormat: %1 lands a Crippling Blow!(%2) */
GetCleanName(), /* Message1 */
itoa(hit.damage_done + hit.min_damage) /* Message2 */
);
// Crippling blows also have a chance to stun
// Kayen: Crippling Blow would cause a chance to interrupt for npcs < 55, with a
// staggers message.
if (defender->GetLevel() <= 55 && !defender->GetSpecialAbility(UNSTUNABLE)) {
defender->Emote("staggers.");
defender->Stun(2000);
}
return;
}
/* Normal Critical hit message */
entity_list.FilteredMessageCloseString(
this, /* Sender */
false, /* Skip Sender */
RuleI(Range, CriticalDamage),
Chat::MeleeCrit, /* Type: 301 */
FilterMeleeCrits, /* FilterType: 12 */
CRITICAL_HIT, /* MessageFormat: %1 scores a critical hit! (%2) */
GetCleanName(), /* Message1 */
itoa(hit.damage_done + hit.min_damage) /* Message2 */
);
}
}
}
bool Mob::TryFinishingBlow(Mob *defender, int &damage)
{
// base2 of FinishingBlowLvl is the HP limit (cur / max) * 1000, 10% is listed as 100
if (defender && !defender->IsClient() && defender->GetHPRatio() < 10) {
uint32 FB_Dmg =
aabonuses.FinishingBlow[SBIndex::FINISHING_EFFECT_DMG] + spellbonuses.FinishingBlow[SBIndex::FINISHING_EFFECT_DMG] + itembonuses.FinishingBlow[SBIndex::FINISHING_EFFECT_DMG];
uint32 FB_Level = 0;
FB_Level = aabonuses.FinishingBlowLvl[SBIndex::FINISHING_EFFECT_LEVEL_MAX];
if (FB_Level < spellbonuses.FinishingBlowLvl[SBIndex::FINISHING_EFFECT_LEVEL_MAX])
FB_Level = spellbonuses.FinishingBlowLvl[SBIndex::FINISHING_EFFECT_LEVEL_MAX];
else if (FB_Level < itembonuses.FinishingBlowLvl[SBIndex::FINISHING_EFFECT_LEVEL_MAX])
FB_Level = itembonuses.FinishingBlowLvl[SBIndex::FINISHING_EFFECT_LEVEL_MAX];
// modern AA description says rank 1 (500) is 50% chance
int ProcChance =
aabonuses.FinishingBlow[SBIndex::FINISHING_EFFECT_PROC_CHANCE] + spellbonuses.FinishingBlow[SBIndex::FINISHING_EFFECT_PROC_CHANCE] + spellbonuses.FinishingBlow[SBIndex::FINISHING_EFFECT_PROC_CHANCE];
if (FB_Level && FB_Dmg && (defender->GetLevel() <= FB_Level) &&
(ProcChance >= zone->random.Int(1, 1000))) {
/* Finishing Blow Critical Message */
entity_list.FilteredMessageCloseString(
this, /* Sender */
false, /* Skip Sender */
RuleI(Range, CriticalDamage),
Chat::MeleeCrit, /* Type: 301 */
FilterMeleeCrits, /* FilterType: 12 */
FINISHING_BLOW, /* MessageFormat: %1 scores a Finishing Blow!!) */
GetCleanName() /* Message1 */
);
damage = FB_Dmg;
return true;
}
}
return false;
}
void Mob::DoRiposte(Mob *defender)
{
LogCombat("Preforming a riposte");
if (!defender)
return;
// so ahhh the angle you can riposte is larger than the angle you can hit :P
if (!defender->IsFacingMob(this)) {
defender->MessageString(Chat::TooFarAway, CANT_SEE_TARGET);
return;
}
defender->Attack(this, EQ::invslot::slotPrimary, true);
if (HasDied())
return;
// this effect isn't used on live? See no AAs or spells
int32 DoubleRipChance = defender->aabonuses.DoubleRiposte + defender->spellbonuses.DoubleRiposte +
defender->itembonuses.DoubleRiposte;
if (DoubleRipChance && zone->random.Roll(DoubleRipChance)) {
LogCombat("Preforming a double riposted from SE_DoubleRiposte ([{}] percent chance)", DoubleRipChance);
defender->Attack(this, EQ::invslot::slotPrimary, true);
if (HasDied())
return;
}
DoubleRipChance = defender->aabonuses.GiveDoubleRiposte[SBIndex::DOUBLE_RIPOSTE_CHANCE] + defender->spellbonuses.GiveDoubleRiposte[SBIndex::DOUBLE_RIPOSTE_CHANCE] +
defender->itembonuses.GiveDoubleRiposte[SBIndex::DOUBLE_RIPOSTE_CHANCE];
// Live AA - Double Riposte
if (DoubleRipChance && zone->random.Roll(DoubleRipChance)) {
LogCombat("Preforming a double riposted from SE_GiveDoubleRiposte base1 == 0 ([{}] percent chance)", DoubleRipChance);
defender->Attack(this, EQ::invslot::slotPrimary, true);
if (HasDied())
return;
}
// Double Riposte effect, allows for a chance to do RIPOSTE with a skill specific special attack (ie Return Kick).
// Coded narrowly: Limit to one per client. Limit AA only. [1 = Skill Attack Chance, 2 = Skill]
DoubleRipChance = defender->aabonuses.GiveDoubleRiposte[SBIndex::DOUBLE_RIPOSTE_SKILL_ATK_CHANCE];
if (DoubleRipChance && zone->random.Roll(DoubleRipChance)) {
LogCombat("Preforming a return SPECIAL ATTACK ([{}] percent chance)", DoubleRipChance);
if (defender->GetClass() == MONK)
defender->MonkSpecialAttack(this, defender->aabonuses.GiveDoubleRiposte[SBIndex::DOUBLE_RIPOSTE_SKILL]);
else if (defender->IsClient()) // so yeah, even if you don't have the skill you can still do the attack :P (and we don't crash anymore)
defender->CastToClient()->DoClassAttacks(this, defender->aabonuses.GiveDoubleRiposte[SBIndex::DOUBLE_RIPOSTE_SKILL], true);
}
}
void Mob::ApplyMeleeDamageMods(uint16 skill, int &damage, Mob *defender, ExtraAttackOptions *opts)
{
int dmgbonusmod = 0;
dmgbonusmod += GetMeleeDamageMod_SE(skill);
dmgbonusmod += GetMeleeDmgPositionMod(defender);
if (opts)
dmgbonusmod += opts->melee_damage_bonus_flat;
if (defender) {
if (defender->IsClient() && defender->GetClass() == WARRIOR)
dmgbonusmod -= 5;
// 168 defensive
dmgbonusmod += (defender->spellbonuses.MeleeMitigationEffect +
defender->itembonuses.MeleeMitigationEffect +
defender->aabonuses.MeleeMitigationEffect);
}
damage += damage * dmgbonusmod / 100;
}
bool Mob::HasDied() {
bool Result = false;
int32 hp_below = 0;
hp_below = (GetDelayDeath() * -1);
if ((GetHP()) <= (hp_below))
Result = true;
return Result;
}
const DamageTable &Mob::GetDamageTable() const
{
static const DamageTable dmg_table[] = {
{ 210, 49, 105 }, // 1-50
{ 245, 35, 80 }, // 51
{ 245, 35, 80 }, // 52
{ 245, 35, 80 }, // 53
{ 245, 35, 80 }, // 54
{ 245, 35, 80 }, // 55
{ 265, 28, 70 }, // 56
{ 265, 28, 70 }, // 57
{ 265, 28, 70 }, // 58
{ 265, 28, 70 }, // 59
{ 285, 23, 65 }, // 60
{ 285, 23, 65 }, // 61
{ 285, 23, 65 }, // 62
{ 290, 21, 60 }, // 63
{ 290, 21, 60 }, // 64
{ 295, 19, 55 }, // 65
{ 295, 19, 55 }, // 66
{ 300, 19, 55 }, // 67
{ 300, 19, 55 }, // 68
{ 300, 19, 55 }, // 69
{ 305, 19, 55 }, // 70
{ 305, 19, 55 }, // 71
{ 310, 17, 50 }, // 72
{ 310, 17, 50 }, // 73
{ 310, 17, 50 }, // 74
{ 315, 17, 50 }, // 75
{ 315, 17, 50 }, // 76
{ 325, 17, 45 }, // 77
{ 325, 17, 45 }, // 78
{ 325, 17, 45 }, // 79
{ 335, 17, 45 }, // 80
{ 335, 17, 45 }, // 81
{ 345, 17, 45 }, // 82
{ 345, 17, 45 }, // 83
{ 345, 17, 45 }, // 84
{ 355, 17, 45 }, // 85
{ 355, 17, 45 }, // 86
{ 365, 17, 45 }, // 87
{ 365, 17, 45 }, // 88
{ 365, 17, 45 }, // 89
{ 375, 17, 45 }, // 90
{ 375, 17, 45 }, // 91
{ 380, 17, 45 }, // 92
{ 380, 17, 45 }, // 93
{ 380, 17, 45 }, // 94
{ 385, 17, 45 }, // 95
{ 385, 17, 45 }, // 96
{ 390, 17, 45 }, // 97
{ 390, 17, 45 }, // 98
{ 390, 17, 45 }, // 99
{ 395, 17, 45 }, // 100
{ 395, 17, 45 }, // 101
{ 400, 17, 45 }, // 102
{ 400, 17, 45 }, // 103
{ 400, 17, 45 }, // 104
{ 405, 17, 45 } // 105
};
static const DamageTable mnk_table[] = {
{ 220, 45, 100 }, // 1-50
{ 245, 35, 80 }, // 51
{ 245, 35, 80 }, // 52
{ 245, 35, 80 }, // 53
{ 245, 35, 80 }, // 54
{ 245, 35, 80 }, // 55
{ 285, 23, 65 }, // 56
{ 285, 23, 65 }, // 57
{ 285, 23, 65 }, // 58
{ 285, 23, 65 }, // 59
{ 290, 21, 60 }, // 60
{ 290, 21, 60 }, // 61
{ 290, 21, 60 }, // 62
{ 295, 19, 55 }, // 63
{ 295, 19, 55 }, // 64
{ 300, 17, 50 }, // 65
{ 300, 17, 50 }, // 66
{ 310, 17, 50 }, // 67
{ 310, 17, 50 }, // 68
{ 310, 17, 50 }, // 69
{ 320, 17, 50 }, // 70
{ 320, 17, 50 }, // 71
{ 325, 15, 45 }, // 72
{ 325, 15, 45 }, // 73
{ 325, 15, 45 }, // 74
{ 330, 15, 45 }, // 75
{ 330, 15, 45 }, // 76
{ 335, 15, 40 }, // 77
{ 335, 15, 40 }, // 78
{ 335, 15, 40 }, // 79
{ 345, 15, 40 }, // 80
{ 345, 15, 40 }, // 81
{ 355, 15, 40 }, // 82
{ 355, 15, 40 }, // 83
{ 355, 15, 40 }, // 84
{ 365, 15, 40 }, // 85
{ 365, 15, 40 }, // 86
{ 375, 15, 40 }, // 87
{ 375, 15, 40 }, // 88
{ 375, 15, 40 }, // 89
{ 385, 15, 40 }, // 90
{ 385, 15, 40 }, // 91
{ 390, 15, 40 }, // 92
{ 390, 15, 40 }, // 93
{ 390, 15, 40 }, // 94
{ 395, 15, 40 }, // 95
{ 395, 15, 40 }, // 96
{ 400, 15, 40 }, // 97
{ 400, 15, 40 }, // 98
{ 400, 15, 40 }, // 99
{ 405, 15, 40 }, // 100
{ 405, 15, 40 }, // 101
{ 410, 15, 40 }, // 102
{ 410, 15, 40 }, // 103
{ 410, 15, 40 }, // 104
{ 415, 15, 40 }, // 105
};
bool monk = GetClass() == MONK;
bool melee = IsWarriorClass();
// tables caped at 105 for now -- future proofed for a while at least :P
int level = std::min(static_cast<int>(GetLevel()), 105);
if (!melee || (!monk && level < 51))
return dmg_table[0];
if (monk && level < 51)
return mnk_table[0];
auto &which = monk ? mnk_table : dmg_table;
return which[level - 50];
}
void Mob::ApplyDamageTable(DamageHitInfo &hit)
{
#ifdef LUA_EQEMU
bool ignoreDefault = false;
LuaParser::Instance()->ApplyDamageTable(this, hit, ignoreDefault);
if (ignoreDefault) {
return;
}
#endif
// someone may want to add this to custom servers, can remove this if that's the case
if (!IsClient()
#ifdef BOTS
&& !IsBot()
#endif
)
return;
// this was parsed, but we do see the min of 10 and the normal minus factor is 105, so makes sense
if (hit.offense < 115)
return;
// things that come out to 1 dmg seem to skip this (ex non-bash slam classes)
if (hit.damage_done < 2)
return;
auto &damage_table = GetDamageTable();
if (zone->random.Roll(damage_table.chance))
return;
int basebonus = hit.offense - damage_table.minusfactor;
basebonus = std::max(10, basebonus / 2);
int extrapercent = zone->random.Roll0(basebonus);
int percent = std::min(100 + extrapercent, damage_table.max_extra);
hit.damage_done = (hit.damage_done * percent) / 100;
if (IsWarriorClass() && GetLevel() > 54)
hit.damage_done++;
Log(Logs::Detail, Logs::Attack, "Damage table applied %d (max %d)", percent, damage_table.max_extra);
}
void Mob::TrySkillProc(Mob *on, uint16 skill, uint16 ReuseTime, bool Success, uint16 hand, bool IsDefensive)
{
if (!on) {
SetTarget(nullptr);
LogError("A null Mob object was passed to Mob::TrySkillProc for evaluation!");
return;
}
if (!spellbonuses.LimitToSkill[skill] && !itembonuses.LimitToSkill[skill] && !aabonuses.LimitToSkill[skill])
return;
/*Allow one proc from each (Spell/Item/AA)
Kayen: Due to limited avialability of effects on live it is too difficult
to confirm how they stack at this time, will adjust formula when more data is avialablle to test.*/
bool CanProc = true;
uint16 base_spell_id = 0;
uint16 proc_spell_id = 0;
float ProcMod = 0;
float chance = 0;
if (IsDefensive)
chance = on->GetSkillProcChances(ReuseTime, hand);
else
chance = GetSkillProcChances(ReuseTime, hand);
if (spellbonuses.LimitToSkill[skill]) {
for (int e = 0; e < MAX_SKILL_PROCS; e++) {
if (CanProc &&
((!Success && spellbonuses.SkillProc[e] && IsValidSpell(spellbonuses.SkillProc[e]))
|| (Success && spellbonuses.SkillProcSuccess[e] && IsValidSpell(spellbonuses.SkillProcSuccess[e])))) {
if (Success)
base_spell_id = spellbonuses.SkillProcSuccess[e];
else
base_spell_id = spellbonuses.SkillProc[e];
proc_spell_id = 0;
ProcMod = 0;
for (int i = 0; i < EFFECT_COUNT; i++) {
if (spells[base_spell_id].effect_id[i] == SE_SkillProc || spells[base_spell_id].effect_id[i] == SE_SkillProcSuccess) {
proc_spell_id = spells[base_spell_id].base_value[i];
ProcMod = static_cast<float>(spells[base_spell_id].limit_value[i]);
}
else if (spells[base_spell_id].effect_id[i] == SE_LimitToSkill && spells[base_spell_id].base_value[i] <= EQ::skills::HIGHEST_SKILL) {
if (CanProc && spells[base_spell_id].base_value[i] == skill && IsValidSpell(proc_spell_id)) {
float final_chance = chance * (ProcMod / 100.0f);
if (zone->random.Roll(final_chance)) {
ExecWeaponProc(nullptr, proc_spell_id, on);
CheckNumHitsRemaining(NumHit::OffensiveSpellProcs, 0,
base_spell_id);
CanProc = false;
break;
}
}
}
else {
//Reset and check for proc in sequence
proc_spell_id = 0;
ProcMod = 0;
}
}
}
}
}
if (itembonuses.LimitToSkill[skill]) {
CanProc = true;
for (int e = 0; e < MAX_SKILL_PROCS; e++) {
if (CanProc &&
((!Success && itembonuses.SkillProc[e] && IsValidSpell(itembonuses.SkillProc[e]))
|| (Success && itembonuses.SkillProcSuccess[e] && IsValidSpell(itembonuses.SkillProcSuccess[e])))) {
if (Success)
base_spell_id = itembonuses.SkillProcSuccess[e];
else
base_spell_id = itembonuses.SkillProc[e];
proc_spell_id = 0;
ProcMod = 0;
for (int i = 0; i < EFFECT_COUNT; i++) {
if (spells[base_spell_id].effect_id[i] == SE_SkillProc || spells[base_spell_id].effect_id[i] == SE_SkillProcSuccess) {
proc_spell_id = spells[base_spell_id].base_value[i];
ProcMod = static_cast<float>(spells[base_spell_id].limit_value[i]);
}
else if (spells[base_spell_id].effect_id[i] == SE_LimitToSkill && spells[base_spell_id].base_value[i] <= EQ::skills::HIGHEST_SKILL) {
if (CanProc && spells[base_spell_id].base_value[i] == skill && IsValidSpell(proc_spell_id)) {
float final_chance = chance * (ProcMod / 100.0f);
if (zone->random.Roll(final_chance)) {
ExecWeaponProc(nullptr, proc_spell_id, on);
CanProc = false;
break;
}
}
}
else {
proc_spell_id = 0;
ProcMod = 0;
}
}
}
}
}
if (IsClient() && aabonuses.LimitToSkill[skill]) {
CanProc = true;
uint32 effect_id = 0;
int32 base_value = 0;
int32 limit_value = 0;
uint32 slot = 0;
for (int e = 0; e < MAX_SKILL_PROCS; e++) {
if (CanProc &&
((!Success && aabonuses.SkillProc[e])
|| (Success && aabonuses.SkillProcSuccess[e]))) {
int aaid = 0;
if (Success)
base_spell_id = aabonuses.SkillProcSuccess[e];
else
base_spell_id = aabonuses.SkillProc[e];
proc_spell_id = 0;
ProcMod = 0;
for (auto &rank_info : aa_ranks) {
auto ability_rank = zone->GetAlternateAdvancementAbilityAndRank(rank_info.first, rank_info.second.first);
auto ability = ability_rank.first;
auto rank = ability_rank.second;
if (!ability) {
continue;
}
for (auto &effect : rank->effects) {
effect_id = effect.effect_id;
base_value = effect.base_value;
limit_value = effect.limit_value;
slot = effect.slot;
if (effect_id == SE_SkillProc || effect_id == SE_SkillProcSuccess) {
proc_spell_id = base_value;
ProcMod = static_cast<float>(limit_value);
}
else if (effect_id == SE_LimitToSkill && base_value <= EQ::skills::HIGHEST_SKILL) {
if (CanProc && base_value == skill && IsValidSpell(proc_spell_id)) {
float final_chance = chance * (ProcMod / 100.0f);
if (zone->random.Roll(final_chance)) {
ExecWeaponProc(nullptr, proc_spell_id, on);
CanProc = false;
break;
}
}
}
else {
proc_spell_id = 0;
ProcMod = 0;
}
}
}
}
}
}
}
float Mob::GetSkillProcChances(uint16 ReuseTime, uint16 hand) {
uint32 weapon_speed;
float ProcChance = 0;
if (!ReuseTime && hand) {
weapon_speed = GetWeaponSpeedbyHand(hand);
ProcChance = static_cast<float>(weapon_speed) * (RuleR(Combat, AvgProcsPerMinute) / 60000.0f);
if (hand == EQ::invslot::slotSecondary)
ProcChance /= 2;
}
else
ProcChance = static_cast<float>(ReuseTime) * (RuleR(Combat, AvgProcsPerMinute) / 60000.0f);
return ProcChance;
}
bool Mob::TryRootFadeByDamage(int buffslot, Mob* attacker) {
/*Dev Quote 2010: http://forums.station.sony.com/eq/posts/list.m?topic_id=161443
The Viscid Roots AA does the following: Reduces the chance for root to break by X percent.
There is no distinction of any kind between the caster inflicted damage, or anyone
else's damage. There is also no distinction between Direct and DOT damage in the root code.
General Mechanics
- Check buffslot to make sure damage from a root does not cancel the root
- If multiple roots on target, always and only checks first root slot and if broken only removes that slots root.
- Only roots on determental spells can be broken by damage.
- Root break chance values obtained from live parses.
*/
if (!attacker || !spellbonuses.Root[SBIndex::ROOT_EXISTS] || spellbonuses.Root[SBIndex::ROOT_BUFFSLOT] < 0)
return false;
if (IsDetrimentalSpell(spellbonuses.Root[SBIndex::ROOT_BUFFSLOT]) && spellbonuses.Root[SBIndex::ROOT_BUFFSLOT] != buffslot) {
int BreakChance = RuleI(Spells, RootBreakFromSpells);
BreakChance -= BreakChance * buffs[spellbonuses.Root[SBIndex::ROOT_BUFFSLOT]].RootBreakChance / 100;
int level_diff = attacker->GetLevel() - GetLevel();
//Use baseline if level difference <= 1 (ie. If target is (1) level less than you, or equal or greater level)
if (level_diff == 2)
BreakChance = (BreakChance * 80) / 100; //Decrease by 20%;
else if (level_diff >= 3 && level_diff <= 20)
BreakChance = (BreakChance * 60) / 100; //Decrease by 40%;
else if (level_diff > 21)
BreakChance = (BreakChance * 20) / 100; //Decrease by 80%;
if (BreakChance < 1)
BreakChance = 1;
if (zone->random.Roll(BreakChance)) {
if (!TryFadeEffect(spellbonuses.Root[SBIndex::ROOT_BUFFSLOT])) {
BuffFadeBySlot(spellbonuses.Root[SBIndex::ROOT_BUFFSLOT]);
LogCombat("Spell broke root! BreakChance percent chance");
return true;
}
}
}
LogCombat("Spell did not break root. BreakChance percent chance");
return false;
}
int32 Mob::RuneAbsorb(int32 damage, uint16 type)
{
uint32 buff_max = GetMaxTotalSlots();
if (type == SE_Rune) {
for (uint32 slot = 0; slot < buff_max; slot++) {
if (slot == spellbonuses.MeleeRune[SBIndex::RUNE_BUFFSLOT] && spellbonuses.MeleeRune[SBIndex::RUNE_AMOUNT] && buffs[slot].melee_rune && IsValidSpell(buffs[slot].spellid)) {
int melee_rune_left = buffs[slot].melee_rune;
if (melee_rune_left > damage)
{
melee_rune_left -= damage;
buffs[slot].melee_rune = melee_rune_left;
return -6;
}
else
{
if (melee_rune_left > 0)
damage -= melee_rune_left;
if (!TryFadeEffect(slot))
BuffFadeBySlot(slot);
}
}
}
}
else {
for (uint32 slot = 0; slot < buff_max; slot++) {
if (slot == spellbonuses.AbsorbMagicAtt[SBIndex::RUNE_BUFFSLOT] && spellbonuses.AbsorbMagicAtt[SBIndex::RUNE_AMOUNT] && buffs[slot].magic_rune && IsValidSpell(buffs[slot].spellid)) {
int magic_rune_left = buffs[slot].magic_rune;
if (magic_rune_left > damage)
{
magic_rune_left -= damage;
buffs[slot].magic_rune = magic_rune_left;
return 0;
}
else
{
if (magic_rune_left > 0)
damage -= magic_rune_left;
if (!TryFadeEffect(slot))
BuffFadeBySlot(slot);
}
}
}
}
return damage;
}
void Mob::CommonOutgoingHitSuccess(Mob* defender, DamageHitInfo &hit, ExtraAttackOptions *opts)
{
if (!defender)
return;
#ifdef LUA_EQEMU
bool ignoreDefault = false;
LuaParser::Instance()->CommonOutgoingHitSuccess(this, defender, hit, opts, ignoreDefault);
if (ignoreDefault) {
return;
}
#endif
// BER weren't parsing the halving
if (hit.skill == EQ::skills::SkillArchery ||
(hit.skill == EQ::skills::SkillThrowing && GetClass() != BERSERKER))
hit.damage_done /= 2;
if (hit.damage_done < 1)
hit.damage_done = 1;
if (hit.skill == EQ::skills::SkillArchery) {
int bonus = aabonuses.ArcheryDamageModifier + itembonuses.ArcheryDamageModifier + spellbonuses.ArcheryDamageModifier;
hit.damage_done += hit.damage_done * bonus / 100;
int headshot = TryHeadShot(defender, hit.skill);
if (headshot > 0) {
hit.damage_done = headshot;
}
else if (GetClass() == RANGER && GetLevel() > 50) { // no double dmg on headshot
if ((defender->IsNPC() && !defender->IsMoving() && !defender->IsRooted()) || !RuleB(Combat, ArcheryBonusRequiresStationary)) {
hit.damage_done *= 2;
MessageString(Chat::MeleeCrit, BOW_DOUBLE_DAMAGE);
}
}
}
int extra_mincap = 0;
int min_mod = hit.base_damage * GetMeleeMinDamageMod_SE(hit.skill) / 100;
if (hit.skill == EQ::skills::SkillBackstab) {
extra_mincap = GetLevel() < 7 ? 7 : GetLevel();
if (GetLevel() >= 60)
extra_mincap = GetLevel() * 2;
else if (GetLevel() > 50)
extra_mincap = GetLevel() * 3 / 2;
if (IsSpecialAttack(eSpecialAttacks::ChaoticStab)) {
hit.damage_done = extra_mincap;
}
else {
int ass = TryAssassinate(defender, hit.skill);
if (ass > 0)
hit.damage_done = ass;
}
}
else if (hit.skill == EQ::skills::SkillFrenzy && GetClass() == BERSERKER && GetLevel() > 50) {
extra_mincap = 4 * GetLevel() / 5;
}
// this has some weird ordering
// Seems the crit message is generated before some of them :P
// worn item +skill dmg, SPA 220, 418. Live has a normalized version that should be here too
hit.min_damage += GetSkillDmgAmt(hit.skill) + GetPositionalDmgAmt(defender);
// shielding mod2
if (defender->itembonuses.MeleeMitigation)
hit.min_damage -= hit.min_damage * defender->itembonuses.MeleeMitigation / 100;
ApplyMeleeDamageMods(hit.skill, hit.damage_done, defender, opts);
min_mod = std::max(min_mod, extra_mincap);
if (min_mod && hit.damage_done < min_mod) // SPA 186
hit.damage_done = min_mod;
TryCriticalHit(defender, hit, opts);
hit.damage_done += hit.min_damage;
if (IsClient()) {
int extra = 0;
switch (hit.skill) {
case EQ::skills::SkillThrowing:
case EQ::skills::SkillArchery:
extra = CastToClient()->GetHeroicDEX() / 10;
break;
default:
extra = CastToClient()->GetHeroicSTR() / 10;
break;
}
hit.damage_done += extra;
}
// this appears where they do special attack dmg mods
int spec_mod = 0;
if (IsSpecialAttack(eSpecialAttacks::Rampage)) {
int mod = GetSpecialAbilityParam(SPECATK_RAMPAGE, 2);
if (mod > 0)
spec_mod = mod;
if ((IsPet() || IsTempPet()) && IsPetOwnerClient()) {
//SE_PC_Pet_Rampage SPA 464 on pet, damage modifier
int spell_mod = spellbonuses.PC_Pet_Rampage[SBIndex::PET_RAMPAGE_DMG_MOD] + itembonuses.PC_Pet_Rampage[SBIndex::PET_RAMPAGE_DMG_MOD] + aabonuses.PC_Pet_Rampage[SBIndex::PET_RAMPAGE_DMG_MOD];
if (spell_mod > spec_mod)
spec_mod = spell_mod;
}
}
else if (IsSpecialAttack(eSpecialAttacks::AERampage)) {
int mod = GetSpecialAbilityParam(SPECATK_AREA_RAMPAGE, 2);
if (mod > 0)
spec_mod = mod;
if ((IsPet() || IsTempPet()) && IsPetOwnerClient()) {
//SE_PC_Pet_AE_Rampage SPA 465 on pet, damage modifier
int spell_mod = spellbonuses.PC_Pet_AE_Rampage[SBIndex::PET_RAMPAGE_DMG_MOD] + itembonuses.PC_Pet_AE_Rampage[SBIndex::PET_RAMPAGE_DMG_MOD] + aabonuses.PC_Pet_AE_Rampage[SBIndex::PET_RAMPAGE_DMG_MOD];
if (spell_mod > spec_mod)
spec_mod = spell_mod;
}
}
if (spec_mod > 0)
hit.damage_done = (hit.damage_done * spec_mod) / 100;
int pct_damage_reduction = defender->GetSkillDmgTaken(hit.skill, opts) + defender->GetPositionalDmgTaken(this);
hit.damage_done += (hit.damage_done * pct_damage_reduction / 100) + (defender->GetFcDamageAmtIncoming(this, 0, true, hit.skill)) + defender->GetPositionalDmgTakenAmt(this);
if (defender->GetShielderID()) {
DoShieldDamageOnShielder(defender, hit.damage_done, hit.skill);
hit.damage_done -= hit.damage_done * defender->GetShieldTargetMitigation() / 100; //Default shielded takes 50 pct damage
}
CheckNumHitsRemaining(NumHit::OutgoingHitSuccess);
}
void Mob::DoShieldDamageOnShielder(Mob *shield_target, int hit_damage_done, EQ::skills::SkillType skillInUse)
{
if (!shield_target) {
return;
}
Mob *shielder = entity_list.GetMob(shield_target->GetShielderID());
if (!shielder) {
shield_target->SetShielderID(0);
shield_target->SetShieldTargetMitigation(0);
return;
}
if (shield_target->CalculateDistance(shielder->GetX(), shielder->GetY(), shielder->GetZ()) > static_cast<float>(shielder->GetMaxShielderDistance())) {
shielder->SetShieldTargetID(0);
shielder->SetShielderMitigation(0);
shielder->SetShielderMaxDistance(0);
shielder->shield_timer.Disable();
shield_target->SetShielderID(0);
shield_target->SetShieldTargetMitigation(0);
return; //Too far away, no message is given thoughh.
}
int mitigation = shielder->GetShielderMitigation(); //Default shielder mitigates 25 pct of damage taken, this can be increased up to max 50 by equiping a shield item
if (shielder->IsClient() && shielder->HasShieldEquiped()) {
EQ::ItemInstance* inst = shielder->CastToClient()->GetInv().GetItem(EQ::invslot::slotSecondary);
if (inst) {
const EQ::ItemData* shield = inst->GetItem();
if (shield && shield->ItemType == EQ::item::ItemTypeShield) {
mitigation += shield->AC * 50 / 100; //1% increase per 2 AC
std::min(50, mitigation);//50 pct max mitigation bonus from /shield
}
}
}
hit_damage_done -= hit_damage_done * mitigation / 100;
shielder->Damage(this, hit_damage_done, SPELL_UNKNOWN, skillInUse, true, -1, false, m_specialattacks);
shielder->CheckNumHitsRemaining(NumHit::OutgoingHitSuccess);
}
void Mob::CommonBreakInvisibleFromCombat()
{
//break invis when you attack
if (invisible) {
LogCombat("Removing invisibility due to melee attack");
BuffFadeByEffect(SE_Invisibility);
BuffFadeByEffect(SE_Invisibility2);
invisible = false;
}
if (invisible_undead) {
LogCombat("Removing invisibility vs. undead due to melee attack");
BuffFadeByEffect(SE_InvisVsUndead);
BuffFadeByEffect(SE_InvisVsUndead2);
invisible_undead = false;
}
if (invisible_animals) {
LogCombat("Removing invisibility vs. animals due to melee attack");
BuffFadeByEffect(SE_InvisVsAnimals);
invisible_animals = false;
}
CancelSneakHide();
if (spellbonuses.NegateIfCombat)
BuffFadeByEffect(SE_NegateIfCombat);
hidden = false;
improved_hidden = false;
}
/* Dev quotes:
* Old formula
* Final delay = (Original Delay / (haste mod *.01f)) + ((Hundred Hands / 100) * Original Delay)
* New formula
* Final delay = (Original Delay / (haste mod *.01f)) + ((Hundred Hands / 1000) * (Original Delay / (haste mod *.01f))
* Base Delay 20 25 30 37
* Haste 2.25 2.25 2.25 2.25
* HHE (old) -17 -17 -17 -17
* Final Delay 5.488888889 6.861111111 8.233333333 10.15444444
*
* Base Delay 20 25 30 37
* Haste 2.25 2.25 2.25 2.25
* HHE (new) -383 -383 -383 -383
* Final Delay 5.484444444 6.855555556 8.226666667 10.14622222
*
* Difference -0.004444444 -0.005555556 -0.006666667 -0.008222222
*
* These times are in 10th of a second
*/
void Mob::SetAttackTimer()
{
attack_timer.SetAtTrigger(4000, true);
}
void Client::SetAttackTimer()
{
float haste_mod = GetHaste() * 0.01f;
int primary_speed = 0;
int secondary_speed = 0;
//default value for attack timer in case they have
//an invalid weapon equipped:
attack_timer.SetAtTrigger(4000, true);
Timer *TimerToUse = nullptr;
for (int i = EQ::invslot::slotRange; i <= EQ::invslot::slotSecondary; i++) {
//pick a timer
if (i == EQ::invslot::slotPrimary)
TimerToUse = &attack_timer;
else if (i == EQ::invslot::slotRange)
TimerToUse = &ranged_timer;
else if (i == EQ::invslot::slotSecondary)
TimerToUse = &attack_dw_timer;
else //invalid slot (hands will always hit this)
continue;
const EQ::ItemData *ItemToUse = nullptr;
//find our item
EQ::ItemInstance *ci = GetInv().GetItem(i);
if (ci)
ItemToUse = ci->GetItem();
//special offhand stuff
if (i == EQ::invslot::slotSecondary) {
//if we cant dual wield, skip it
if (!CanThisClassDualWield() || HasTwoHanderEquipped()) {
attack_dw_timer.Disable();
continue;
}
}
//see if we have a valid weapon
if (ItemToUse != nullptr) {
//check type and damage/delay
if (!ItemToUse->IsClassCommon()
|| ItemToUse->Damage == 0
|| ItemToUse->Delay == 0) {
//no weapon
ItemToUse = nullptr;
}
// Check to see if skill is valid
else if ((ItemToUse->ItemType > EQ::item::ItemTypeLargeThrowing) &&
(ItemToUse->ItemType != EQ::item::ItemTypeMartial) &&
(ItemToUse->ItemType != EQ::item::ItemType2HPiercing)) {
//no weapon
ItemToUse = nullptr;
}
}
int hhe = itembonuses.HundredHands + spellbonuses.HundredHands;
int speed = 0;
int delay = 3500;
//if we have no weapon..
if (ItemToUse == nullptr)
delay = 100 * GetHandToHandDelay();
else
//we have a weapon, use its delay
delay = 100 * ItemToUse->Delay;
speed = delay / haste_mod;
if (ItemToUse && ItemToUse->ItemType == EQ::item::ItemTypeBow) {
// Live actually had a bug here where they would return the non-modified attack speed
// rather than the cap ...
speed = std::max(speed - GetQuiverHaste(speed), RuleI(Combat, QuiverHasteCap));
}
else {
if (RuleB(Spells, Jun182014HundredHandsRevamp))
speed = static_cast<int>(speed + ((hhe / 1000.0f) * speed));
else
speed = static_cast<int>(speed + ((hhe / 100.0f) * delay));
}
TimerToUse->SetAtTrigger(std::max(RuleI(Combat, MinHastedDelay), speed), true, true);
if (i == EQ::invslot::slotPrimary) {
primary_speed = speed;
}
else if (i == EQ::invslot::slotSecondary) {
secondary_speed = speed;
}
}
//To allow for duel wield animation to display correctly if both weapons have same delay
if (primary_speed == secondary_speed) {
SetDualWieldingSameDelayWeapons(1);
}
else {
SetDualWieldingSameDelayWeapons(0);
}
}
void NPC::SetAttackTimer()
{
float haste_mod = GetHaste() * 0.01f;
//default value for attack timer in case they have
//an invalid weapon equipped:
attack_timer.SetAtTrigger(4000, true);
Timer *TimerToUse = nullptr;
int hhe = itembonuses.HundredHands + spellbonuses.HundredHands;
// Technically NPCs should do some logic for weapons, but the effect is minimal
// What they do is take the lower of their set delay and the weapon's
// ex. Mob's delay set to 20, weapon set to 19, delay 19
// Mob's delay set to 20, weapon set to 21, delay 20
int speed = 0;
if (RuleB(Spells, Jun182014HundredHandsRevamp))
speed = static_cast<int>((attack_delay / haste_mod) + ((hhe / 1000.0f) * (attack_delay / haste_mod)));
else
speed = static_cast<int>((attack_delay / haste_mod) + ((hhe / 100.0f) * attack_delay));
for (int i = EQ::invslot::slotRange; i <= EQ::invslot::slotSecondary; i++) {
//pick a timer
if (i == EQ::invslot::slotPrimary)
TimerToUse = &attack_timer;
else if (i == EQ::invslot::slotRange)
TimerToUse = &ranged_timer;
else if (i == EQ::invslot::slotSecondary)
TimerToUse = &attack_dw_timer;
else //invalid slot (hands will always hit this)
continue;
//special offhand stuff
if (i == EQ::invslot::slotSecondary) {
// SPECATK_QUAD is uncheesable
if (!CanThisClassDualWield() || (HasTwoHanderEquipped() && !GetSpecialAbility(SPECATK_QUAD))) {
attack_dw_timer.Disable();
continue;
}
}
TimerToUse->SetAtTrigger(std::max(RuleI(Combat, MinHastedDelay), speed), true, true);
}
}
void Client::DoAttackRounds(Mob *target, int hand, bool IsFromSpell)
{
if (!target)
return;
Attack(target, hand, false, false, IsFromSpell);
bool candouble = CanThisClassDoubleAttack();
// extra off hand non-sense, can only double with skill of 150 or above
// or you have any amount of GiveDoubleAttack
if (candouble && hand == EQ::invslot::slotSecondary)
candouble =
GetSkill(EQ::skills::SkillDoubleAttack) > 149 ||
(aabonuses.GiveDoubleAttack + spellbonuses.GiveDoubleAttack + itembonuses.GiveDoubleAttack) > 0;
if (candouble) {
CheckIncreaseSkill(EQ::skills::SkillDoubleAttack, target, -10);
if (CheckDoubleAttack()) {
Attack(target, hand, false, false, IsFromSpell);
if (hand == EQ::invslot::slotPrimary) {
if (HasTwoHanderEquipped()) {
auto extraattackchance = aabonuses.ExtraAttackChance[SBIndex::EXTRA_ATTACK_CHANCE] + spellbonuses.ExtraAttackChance[SBIndex::EXTRA_ATTACK_CHANCE] +
itembonuses.ExtraAttackChance[SBIndex::EXTRA_ATTACK_CHANCE];
if (extraattackchance && zone->random.Roll(extraattackchance)) {
auto extraattackamt = std::max({aabonuses.ExtraAttackChance[SBIndex::EXTRA_ATTACK_NUM_ATKS], spellbonuses.ExtraAttackChance[SBIndex::EXTRA_ATTACK_NUM_ATKS], itembonuses.ExtraAttackChance[SBIndex::EXTRA_ATTACK_NUM_ATKS] });
for (int i = 0; i < extraattackamt; i++) {
Attack(target, hand, false, false, IsFromSpell);
}
}
}
else {
auto extraattackchance_primary = aabonuses.ExtraAttackChancePrimary[SBIndex::EXTRA_ATTACK_CHANCE] + spellbonuses.ExtraAttackChancePrimary[SBIndex::EXTRA_ATTACK_CHANCE] +
itembonuses.ExtraAttackChancePrimary[SBIndex::EXTRA_ATTACK_CHANCE];
if (extraattackchance_primary && zone->random.Roll(extraattackchance_primary)) {
auto extraattackamt_primary = std::max({aabonuses.ExtraAttackChancePrimary[SBIndex::EXTRA_ATTACK_NUM_ATKS], spellbonuses.ExtraAttackChancePrimary[SBIndex::EXTRA_ATTACK_NUM_ATKS], itembonuses.ExtraAttackChancePrimary[SBIndex::EXTRA_ATTACK_NUM_ATKS] });
for (int i = 0; i < extraattackamt_primary; i++) {
Attack(target, hand, false, false, IsFromSpell);
}
}
}
}
if (hand == EQ::invslot::slotSecondary) {
auto extraattackchance_secondary = aabonuses.ExtraAttackChanceSecondary[SBIndex::EXTRA_ATTACK_CHANCE] + spellbonuses.ExtraAttackChanceSecondary[SBIndex::EXTRA_ATTACK_CHANCE] +
itembonuses.ExtraAttackChanceSecondary[SBIndex::EXTRA_ATTACK_CHANCE];
if (extraattackchance_secondary && zone->random.Roll(extraattackchance_secondary)) {
auto extraattackamt_secondary = std::max({aabonuses.ExtraAttackChanceSecondary[SBIndex::EXTRA_ATTACK_NUM_ATKS], spellbonuses.ExtraAttackChanceSecondary[SBIndex::EXTRA_ATTACK_NUM_ATKS], itembonuses.ExtraAttackChanceSecondary[SBIndex::EXTRA_ATTACK_NUM_ATKS] });
for (int i = 0; i < extraattackamt_secondary; i++) {
Attack(target, hand, false, false, IsFromSpell);
}
}
}
// you can only triple from the main hand
if (hand == EQ::invslot::slotPrimary && CanThisClassTripleAttack()) {
CheckIncreaseSkill(EQ::skills::SkillTripleAttack, target, -10);
if (CheckTripleAttack()) {
Attack(target, hand, false, false, IsFromSpell);
auto flurrychance = aabonuses.FlurryChance + spellbonuses.FlurryChance +
itembonuses.FlurryChance;
if (flurrychance && zone->random.Roll(flurrychance)) {
Attack(target, hand, false, false, IsFromSpell);
if (zone->random.Roll(flurrychance))
Attack(target, hand, false, false, IsFromSpell);
MessageString(Chat::NPCFlurry, YOU_FLURRY);
}
}
}
}
}
}
bool Mob::CheckDualWield()
{
// Pets /might/ follow a slightly different progression
// although it could all be from pets having different skills than most mobs
int chance = GetSkill(EQ::skills::SkillDualWield);
if (GetLevel() > 35)
chance += GetLevel();
chance += aabonuses.Ambidexterity + spellbonuses.Ambidexterity + itembonuses.Ambidexterity;
int per_inc = spellbonuses.DualWieldChance + aabonuses.DualWieldChance + itembonuses.DualWieldChance;
if (per_inc)
chance += chance * per_inc / 100;
return zone->random.Int(1, 375) <= chance;
}
bool Client::CheckDualWield()
{
int chance = GetSkill(EQ::skills::SkillDualWield) + GetLevel();
chance += aabonuses.Ambidexterity + spellbonuses.Ambidexterity + itembonuses.Ambidexterity;
int per_inc = spellbonuses.DualWieldChance + aabonuses.DualWieldChance + itembonuses.DualWieldChance;
if (per_inc)
chance += chance * per_inc / 100;
return zone->random.Int(1, 375) <= chance;
}
void Mob::DoMainHandAttackRounds(Mob *target, ExtraAttackOptions *opts)
{
if (!target)
return;
if (RuleB(Combat, UseLiveCombatRounds)) {
// A "quad" on live really is just a successful dual wield where both double attack
// The mobs that could triple lost the ability to when the triple attack skill was added in
Attack(target, EQ::invslot::slotPrimary, false, false, false, opts);
if (CanThisClassDoubleAttack() && CheckDoubleAttack()) {
Attack(target, EQ::invslot::slotPrimary, false, false, false, opts);
if ((IsPet() || IsTempPet()) && IsPetOwnerClient()) {
int chance = spellbonuses.PC_Pet_Flurry + itembonuses.PC_Pet_Flurry + aabonuses.PC_Pet_Flurry;
if (chance && zone->random.Roll(chance))
Flurry(nullptr);
}
}
return;
}
if (IsNPC()) {
int16 n_atk = CastToNPC()->GetNumberOfAttacks();
if (n_atk <= 1) {
Attack(target, EQ::invslot::slotPrimary, false, false, false, opts);
}
else {
for (int i = 0; i < n_atk; ++i) {
Attack(target, EQ::invslot::slotPrimary, false, false, false, opts);
}
}
}
else {
Attack(target, EQ::invslot::slotPrimary, false, false, false, opts);
}
// we use this random value in three comparisons with different
// thresholds, and if its truely random, then this should work
// out reasonably and will save us compute resources.
int32 RandRoll = zone->random.Int(0, 99);
if ((CanThisClassDoubleAttack() || GetSpecialAbility(SPECATK_TRIPLE) || GetSpecialAbility(SPECATK_QUAD))
// check double attack, this is NOT the same rules that clients use...
&&
RandRoll < (GetLevel() + NPCDualAttackModifier)) {
Attack(target, EQ::invslot::slotPrimary, false, false, false, opts);
// lets see if we can do a triple attack with the main hand
// pets are excluded from triple and quads...
if ((GetSpecialAbility(SPECATK_TRIPLE) || GetSpecialAbility(SPECATK_QUAD)) && !IsPet() &&
RandRoll < (GetLevel() + NPCTripleAttackModifier)) {
Attack(target, EQ::invslot::slotPrimary, false, false, false, opts);
// now lets check the quad attack
if (GetSpecialAbility(SPECATK_QUAD) && RandRoll < (GetLevel() + NPCQuadAttackModifier)) {
Attack(target, EQ::invslot::slotPrimary, false, false, false, opts);
}
}
}
}
void Mob::DoOffHandAttackRounds(Mob *target, ExtraAttackOptions *opts)
{
if (!target)
return;
// Mobs will only dual wield w/ the flag or have a secondary weapon
// For now, SPECATK_QUAD means innate DW when Combat:UseLiveCombatRounds is true
if ((GetSpecialAbility(SPECATK_INNATE_DW) ||
(RuleB(Combat, UseLiveCombatRounds) && GetSpecialAbility(SPECATK_QUAD))) ||
GetEquippedItemFromTextureSlot(EQ::textures::weaponSecondary) != 0) {
if (CheckDualWield()) {
Attack(target, EQ::invslot::slotSecondary, false, false, false, opts);
if (CanThisClassDoubleAttack() && GetLevel() > 35 && CheckDoubleAttack()) {
Attack(target, EQ::invslot::slotSecondary, false, false, false, opts);
if ((IsPet() || IsTempPet()) && IsPetOwnerClient()) {
int chance = spellbonuses.PC_Pet_Flurry + itembonuses.PC_Pet_Flurry + aabonuses.PC_Pet_Flurry;
if (chance && zone->random.Roll(chance))
Flurry(nullptr);
}
}
}
}
}
int Mob::GetPetAvoidanceBonusFromOwner()
{
Mob *owner = nullptr;
if (IsPet())
owner = GetOwner();
else if (IsNPC() && CastToNPC()->GetSwarmOwner())
owner = entity_list.GetMobID(CastToNPC()->GetSwarmOwner());
if (owner)
return owner->aabonuses.PetAvoidance + owner->spellbonuses.PetAvoidance + owner->itembonuses.PetAvoidance;
return 0;
}
int Mob::GetPetACBonusFromOwner()
{
Mob *owner = nullptr;
if (IsPet())
owner = GetOwner();
else if (IsNPC() && CastToNPC()->GetSwarmOwner())
owner = entity_list.GetMobID(CastToNPC()->GetSwarmOwner());
if (owner)
return owner->aabonuses.PetMeleeMitigation + owner->spellbonuses.PetMeleeMitigation + owner->itembonuses.PetMeleeMitigation;
return 0;
}
int Mob::GetPetATKBonusFromOwner()
{
Mob *owner = nullptr;
if (IsPet())
owner = GetOwner();
else if (IsNPC() && CastToNPC()->GetSwarmOwner())
owner = entity_list.GetMobID(CastToNPC()->GetSwarmOwner());
if (owner)
return owner->aabonuses.Pet_Add_Atk + owner->spellbonuses.Pet_Add_Atk + owner->itembonuses.Pet_Add_Atk;
return 0;
}
bool Mob::GetWasSpawnedInWater() const {
return spawned_in_water;
}
void Mob::SetSpawnedInWater(bool spawned_in_water) {
Mob::spawned_in_water = spawned_in_water;
}
int32 Mob::GetHPRegen() const
{
return hp_regen;
}
int32 Mob::GetManaRegen() const
{
return mana_regen;
}
| 1 | 11,029 | No possible chance for weird overflows here, right? | EQEmu-Server | cpp |
@@ -39,9 +39,13 @@ import org.apache.lucene.search.Weight;
* @lucene.experimental
*/
public abstract class ValueSourceScorer extends Scorer {
+ // Fixed cost for a single iteration of the TwoPhaseIterator instance
+ private static final int DEF_COST = 5;
+
protected final FunctionValues values;
private final TwoPhaseIterator twoPhaseIterator;
private final DocIdSetIterator disi;
+ private float externallyMutableCost;
protected ValueSourceScorer(Weight weight, LeafReaderContext readerContext, FunctionValues values) {
super(weight); | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.lucene.queries.function;
import java.io.IOException;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.search.Scorer;
import org.apache.lucene.search.TwoPhaseIterator;
import org.apache.lucene.search.Weight;
/**
* {@link Scorer} which returns the result of {@link FunctionValues#floatVal(int)} as
* the score for a document, and which filters out documents that don't match {@link #matches(int)}.
* This Scorer has a {@link TwoPhaseIterator}. This is similar to {@link FunctionQuery},
* with an added filter.
* <p>
* Note: If the scores are needed, then the underlying value will probably be
* fetched/computed twice -- once to filter and next to return the score. If that's non-trivial then
* consider wrapping it in an implementation that will cache the current value.
* </p>
*
* @see FunctionQuery
* @lucene.experimental
*/
public abstract class ValueSourceScorer extends Scorer {
protected final FunctionValues values;
private final TwoPhaseIterator twoPhaseIterator;
private final DocIdSetIterator disi;
protected ValueSourceScorer(Weight weight, LeafReaderContext readerContext, FunctionValues values) {
super(weight);
this.values = values;
final DocIdSetIterator approximation = DocIdSetIterator.all(readerContext.reader().maxDoc()); // no approximation!
this.twoPhaseIterator = new TwoPhaseIterator(approximation) {
@Override
public boolean matches() throws IOException {
return ValueSourceScorer.this.matches(approximation.docID());
}
@Override
public float matchCost() {
return 100; // TODO: use cost of ValueSourceScorer.this.matches()
}
};
this.disi = TwoPhaseIterator.asDocIdSetIterator(twoPhaseIterator);
}
/** Override to decide if this document matches. It's called by {@link TwoPhaseIterator#matches()}. */
public abstract boolean matches(int doc) throws IOException;
@Override
public DocIdSetIterator iterator() {
return disi;
}
@Override
public TwoPhaseIterator twoPhaseIterator() {
return twoPhaseIterator;
}
@Override
public int docID() {
return disi.docID();
}
@Override
public float score() throws IOException {
// (same as FunctionQuery, but no qWeight) TODO consider adding configurable qWeight
float score = values.floatVal(disi.docID());
// Current Lucene priority queues can't handle NaN and -Infinity, so
// map to -Float.MAX_VALUE. This conditional handles both -infinity
// and NaN since comparisons with NaN are always false.
return score > Float.NEGATIVE_INFINITY ? score : -Float.MAX_VALUE;
}
@Override
public float getMaxScore(int upTo) throws IOException {
return Float.POSITIVE_INFINITY;
}
}
| 1 | 32,878 | Or we could use a Float object to more clearly show as user-settable via non-null? | apache-lucene-solr | java |
@@ -107,7 +107,7 @@ namespace Nethermind.Merge.Plugin.Handlers.V1
if (headUpdated && shouldUpdateHead)
{
- _poSSwitcher.ForkchoiceUpdated(newHeadBlock!.Header);
+ _poSSwitcher.ForkchoiceUpdated(newHeadBlock!.Header, finalizedHeader);
_stateProvider.ResetStateTo(newHeadBlock.StateRoot!);
if (_logger.IsInfo) _logger.Info($"Block {forkchoiceState.HeadBlockHash} was set as head");
} | 1 | // Copyright (c) 2021 Demerzel Solutions Limited
// This file is part of the Nethermind library.
//
// The Nethermind library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The Nethermind library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the Nethermind. If not, see <http://www.gnu.org/licenses/>.
//
using System;
using System.Collections.Generic;
using Nethermind.Blockchain;
using Nethermind.Blockchain.Find;
using Nethermind.Consensus;
using Nethermind.Consensus.Producers;
using Nethermind.Core;
using Nethermind.Core.Crypto;
using Nethermind.Core.Extensions;
using Nethermind.Facade.Eth;
using Nethermind.JsonRpc;
using Nethermind.Logging;
using Nethermind.Merge.Plugin.Data;
using Nethermind.Merge.Plugin.Data.V1;
using Nethermind.State;
namespace Nethermind.Merge.Plugin.Handlers.V1
{
/// <summary>
/// https://github.com/ethereum/execution-apis/blob/main/src/engine/specification.md
/// Propagates the change in the fork choice to the execution client
/// </summary>
public class ForkchoiceUpdatedV1Handler : IForkchoiceUpdatedV1Handler
{
private readonly IBlockTree _blockTree;
private readonly IStateProvider _stateProvider;
private readonly IManualBlockFinalizationManager _manualBlockFinalizationManager;
private readonly IPoSSwitcher _poSSwitcher;
private readonly IEthSyncingInfo _ethSyncingInfo;
private readonly IBlockConfirmationManager _blockConfirmationManager;
private readonly IPayloadService _payloadService;
private readonly ILogger _logger;
public ForkchoiceUpdatedV1Handler(
IBlockTree blockTree,
IStateProvider stateProvider,
IManualBlockFinalizationManager manualBlockFinalizationManager,
IPoSSwitcher poSSwitcher,
IEthSyncingInfo ethSyncingInfo,
IBlockConfirmationManager blockConfirmationManager,
IPayloadService payloadService,
ILogManager logManager)
{
_blockTree = blockTree ?? throw new ArgumentNullException(nameof(blockTree));
_stateProvider = stateProvider ?? throw new ArgumentNullException(nameof(stateProvider));
_manualBlockFinalizationManager = manualBlockFinalizationManager ?? throw new ArgumentNullException(nameof(manualBlockFinalizationManager));
_poSSwitcher = poSSwitcher ?? throw new ArgumentNullException(nameof(poSSwitcher));
_ethSyncingInfo = ethSyncingInfo ?? throw new ArgumentNullException(nameof(ethSyncingInfo));
_blockConfirmationManager = blockConfirmationManager ?? throw new ArgumentNullException(nameof(blockConfirmationManager));
_payloadService = payloadService;
_logger = logManager.GetClassLogger();
}
public ResultWrapper<ForkchoiceUpdatedV1Result> Handle(ForkchoiceStateV1 forkchoiceState, PayloadAttributes? payloadAttributes)
{
// ToDo wait for final PostMerge sync
// if (_ethSyncingInfo.IsSyncing())
// {
// return ResultWrapper<ForkchoiceUpdatedV1Result>.Success(new ForkchoiceUpdatedV1Result() { Status = EngineStatus.Syncing});
// }
(BlockHeader? finalizedHeader, string? finalizationErrorMsg) = EnsureHeaderForFinalization(forkchoiceState.FinalizedBlockHash);
if (finalizationErrorMsg != null)
return ResultWrapper<ForkchoiceUpdatedV1Result>.Success(new ForkchoiceUpdatedV1Result() { Status = EngineStatus.Syncing}); // ToDo wait for final PostMerge sync
(BlockHeader? confirmedHeader, string? confirmationErrorMsg) = EnsureHeaderForConfirmation(forkchoiceState.SafeBlockHash);
if (confirmationErrorMsg != null)
return ResultWrapper<ForkchoiceUpdatedV1Result>.Success(new ForkchoiceUpdatedV1Result() { Status = EngineStatus.Syncing}); // ToDo wait for final PostMerge sync
(Block? newHeadBlock, Block[]? blocks, string? setHeadErrorMsg) = EnsureBlocksForSetHead(forkchoiceState.HeadBlockHash);
if (setHeadErrorMsg != null)
return ResultWrapper<ForkchoiceUpdatedV1Result>.Success(new ForkchoiceUpdatedV1Result() { Status = EngineStatus.Syncing}); // ToDo wait for final PostMerge sync
if (ShouldFinalize(forkchoiceState.FinalizedBlockHash))
_manualBlockFinalizationManager.MarkFinalized(newHeadBlock!.Header, finalizedHeader!);
else if (_manualBlockFinalizationManager.LastFinalizedHash != Keccak.Zero)
if (_logger.IsWarn) _logger.Warn($"Cannot finalize block. The current finalized block is: {_manualBlockFinalizationManager.LastFinalizedHash}, the requested hash: {forkchoiceState.FinalizedBlockHash}");
// In future safeBlockHash will be added to JSON-RPC
_blockConfirmationManager.Confirm(confirmedHeader!.Hash!);
byte[]? payloadId = null;
bool headUpdated = false;
bool shouldUpdateHead = blocks != null && _blockTree.Head != newHeadBlock;
if (shouldUpdateHead)
{
_blockTree.UpdateMainChain(blocks!, true, true);
headUpdated = _blockTree.Head == newHeadBlock;
}
if (headUpdated && shouldUpdateHead)
{
_poSSwitcher.ForkchoiceUpdated(newHeadBlock!.Header);
_stateProvider.ResetStateTo(newHeadBlock.StateRoot!);
if (_logger.IsInfo) _logger.Info($"Block {forkchoiceState.HeadBlockHash} was set as head");
}
else if (headUpdated == false && shouldUpdateHead)
{
// ToDo we should never have this case. Consult it with LR
if (_logger.IsWarn) _logger.Warn($"Block {forkchoiceState.FinalizedBlockHash} was not set as head.");
}
bool shouldStartPreparingPayload = payloadAttributes != null;
if (shouldStartPreparingPayload)
{
payloadId = _payloadService.StartPreparingPayload(newHeadBlock!.Header, payloadAttributes);
}
return ResultWrapper<ForkchoiceUpdatedV1Result>.Success(new ForkchoiceUpdatedV1Result() { PayloadId = payloadId?.ToHexString(true), Status = EngineStatus.Success});
}
private (BlockHeader? BlockHeader, string? ErrorMsg) EnsureHeaderForConfirmation(Keccak confirmedBlockHash)
{
string? errorMsg = null;
BlockHeader? blockHeader = _blockTree.FindHeader(confirmedBlockHash, BlockTreeLookupOptions.None);
if (blockHeader is null)
{
errorMsg = $"Block {confirmedBlockHash} not found for confirmation.";
if (_logger.IsWarn) _logger.Warn(errorMsg);
}
return (blockHeader, errorMsg);
}
private (Block? NewHeadBlock, Block[]? Blocks, string? ErrorMsg) EnsureBlocksForSetHead(Keccak headBlockHash)
{
string? errorMsg = null;
Block? headBlock = _blockTree.FindBlock(headBlockHash, BlockTreeLookupOptions.None);
if (headBlock == null)
{
errorMsg = $"Block {headBlockHash} cannot be found and it will not be set as head.";
if (_logger.IsWarn) _logger.Warn(errorMsg);
return (headBlock, null, errorMsg);
}
if (_blockTree.Head!.Hash == headBlockHash)
{
return (headBlock, null, errorMsg);
}
if (!TryGetBranch(headBlock, out Block[] branchOfBlocks))
{
errorMsg = $"Block's {headBlockHash} main chain predecessor cannot be found and it will not be set as head.";
if (_logger.IsWarn) _logger.Warn(errorMsg);
}
return (headBlock, branchOfBlocks, errorMsg);
}
private (BlockHeader? BlockHeader, string? ErrorMsg) EnsureHeaderForFinalization(Keccak finalizedBlockHash)
{
string? errorMsg = null;
BlockHeader? blockHeader = _blockTree.FindHeader(finalizedBlockHash, BlockTreeLookupOptions.None);
if (ShouldFinalize(finalizedBlockHash))
{
blockHeader = _blockTree.FindHeader(finalizedBlockHash, BlockTreeLookupOptions.None);
if (blockHeader is null)
{
errorMsg = $"Block {finalizedBlockHash} not found for finalization.";
if (_logger.IsWarn) _logger.Warn(errorMsg);
}
}
return (blockHeader, errorMsg);
}
private bool ShouldFinalize(Keccak finalizedBlockHash) => finalizedBlockHash != Keccak.Zero;
private bool TryGetBranch(Block block, out Block[] blocks)
{
List<Block> blocksList = new() {block};
Block? predecessor = block;
while (!_blockTree.IsMainChain(predecessor.Header))
{
predecessor = _blockTree.FindParent(predecessor, BlockTreeLookupOptions.None);
if (predecessor == null)
{
blocks = Array.Empty<Block>();
return false;
}
blocksList.Add(predecessor);
};
blocksList.Reverse();
blocks = blocksList.ToArray();
return true;
}
}
}
| 1 | 26,429 | finalizedHeader should be saved in FinalizationManager when we have FinalizationBlockHash != Keccak.Zero | NethermindEth-nethermind | .cs |
@@ -48,6 +48,7 @@ func AddDiskImportSteps(w *daisy.Workflow, dataDiskInfos []ovfutils.DiskInfo) {
setupDataDiskStepName := fmt.Sprintf("setup-data-disk-%v", dataDiskIndex)
diskImporterDiskName := fmt.Sprintf("disk-importer-%v", dataDiskIndex)
+ scratchDiskDiskName := fmt.Sprintf("disk-importer-scratch-%s-%v", w.ID(), dataDiskIndex)
setupDataDiskStep := daisy.NewStep(setupDataDiskStepName, w, time.Hour)
setupDataDiskStep.CreateDisks = &daisy.CreateDisks{ | 1 | // Copyright 2019 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License
package daisyovfutils
import (
"fmt"
"time"
"github.com/GoogleCloudPlatform/compute-image-tools/cli_tools/gce_ovf_import/ovf_utils"
"github.com/GoogleCloudPlatform/compute-image-tools/daisy"
"google.golang.org/api/compute/v1"
)
const (
createInstanceStepName = "create-instance"
importerDiskSize = "10"
dataDiskImportTimeout = "3600s"
)
// AddDiskImportSteps adds Daisy steps to OVF import workflow to import disks defined in
// dataDiskInfos.
func AddDiskImportSteps(w *daisy.Workflow, dataDiskInfos []ovfutils.DiskInfo) {
if dataDiskInfos == nil || len(dataDiskInfos) == 0 {
return
}
var diskNames []string
w.Sources["import_image_data.sh"] = "../image_import/import_image.sh"
for i, dataDiskInfo := range dataDiskInfos {
dataDiskIndex := i + 1
dataDiskFilePath := dataDiskInfo.FilePath
diskNames = append(
diskNames,
fmt.Sprintf("%v-data-disk-%v", w.Vars["instance_name"].Value, dataDiskIndex))
setupDataDiskStepName := fmt.Sprintf("setup-data-disk-%v", dataDiskIndex)
diskImporterDiskName := fmt.Sprintf("disk-importer-%v", dataDiskIndex)
setupDataDiskStep := daisy.NewStep(setupDataDiskStepName, w, time.Hour)
setupDataDiskStep.CreateDisks = &daisy.CreateDisks{
{
Disk: compute.Disk{
Name: diskImporterDiskName,
SourceImage: "projects/compute-image-tools/global/images/family/debian-9-worker",
Type: "pd-ssd",
},
SizeGb: importerDiskSize,
},
{
Disk: compute.Disk{
Name: diskNames[i],
Type: "pd-ssd",
},
SizeGb: "10",
Resource: daisy.Resource{
ExactName: true,
NoCleanup: true,
},
},
}
w.Steps[setupDataDiskStepName] = setupDataDiskStep
createDiskImporterInstanceStepName := fmt.Sprintf("create-data-disk-import-instance-%v", dataDiskIndex)
createDiskImporterInstanceStep := daisy.NewStep(createDiskImporterInstanceStepName, w, time.Hour)
sTrue := "true"
dataDiskImporterInstanceName := fmt.Sprintf("data-disk-importer-%v", dataDiskIndex)
createDiskImporterInstanceStep.CreateInstances = &daisy.CreateInstances{
{
Instance: compute.Instance{
Name: dataDiskImporterInstanceName,
Disks: []*compute.AttachedDisk{{Source: diskImporterDiskName}},
MachineType: "n1-standard-4",
Metadata: &compute.Metadata{
Items: []*compute.MetadataItems{
{Key: "block-project-ssh-keys", Value: &sTrue},
{Key: "disk_name", Value: &diskNames[i]},
{Key: "source_disk_file", Value: &dataDiskFilePath},
},
},
NetworkInterfaces: []*compute.NetworkInterface{
{
Network: w.Vars["network"].Value,
Subnetwork: w.Vars["subnet"].Value,
},
},
},
Scopes: []string{
"https://www.googleapis.com/auth/devstorage.read_write",
"https://www.googleapis.com/auth/compute",
},
StartupScript: "import_image_data.sh",
},
}
w.Steps[createDiskImporterInstanceStepName] = createDiskImporterInstanceStep
waitForDataDiskImportInstanceSignalStepName := fmt.Sprintf("wait-for-data-disk-%v-signal", dataDiskIndex)
waitForDataDiskImportInstanceSignalStep := daisy.NewStep(waitForDataDiskImportInstanceSignalStepName, w, time.Hour)
waitForDataDiskImportInstanceSignalStep.Timeout = dataDiskImportTimeout
waitForDataDiskImportInstanceSignalStep.WaitForInstancesSignal = &daisy.WaitForInstancesSignal{
{
Name: dataDiskImporterInstanceName,
SerialOutput: &daisy.SerialOutput{
Port: 1,
SuccessMatch: "ImportSuccess:",
FailureMatch: []string{"ImportFailed:", "WARNING Failed to download metadata script"},
StatusMatch: "Import:",
},
},
}
w.Steps[waitForDataDiskImportInstanceSignalStepName] = waitForDataDiskImportInstanceSignalStep
deleteDataDiskImportInstanceSignalStepName := fmt.Sprintf("delete-data-disk-%v-import-instance", dataDiskIndex)
deleteDataDiskImportInstanceSignalStep := daisy.NewStep(deleteDataDiskImportInstanceSignalStepName, w, time.Hour)
deleteDataDiskImportInstanceSignalStep.DeleteResources = &daisy.DeleteResources{
Instances: []string{dataDiskImporterInstanceName},
}
w.Steps[deleteDataDiskImportInstanceSignalStepName] = deleteDataDiskImportInstanceSignalStep
w.Dependencies[createDiskImporterInstanceStepName] = []string{setupDataDiskStepName}
w.Dependencies[waitForDataDiskImportInstanceSignalStepName] = []string{createDiskImporterInstanceStepName}
w.Dependencies[deleteDataDiskImportInstanceSignalStepName] = []string{waitForDataDiskImportInstanceSignalStepName}
w.Dependencies[createInstanceStepName] = append(
w.Dependencies[createInstanceStepName], deleteDataDiskImportInstanceSignalStepName)
}
// attach newly created disks to the instance
for _, diskName := range diskNames {
(*w.Steps[createInstanceStepName].CreateInstances)[0].Disks =
append(
(*w.Steps[createInstanceStepName].CreateInstances)[0].Disks,
&compute.AttachedDisk{Source: diskName, AutoDelete: true})
}
}
| 1 | 9,319 | Not using ExactName: true would remove the need for manually adding workflow ID as it would be added automatically by Daisy. E.g. 'disk-importer-2-import-ovf-7mn7h' was created from diskImporterDiskName above even though only 'disk-importer-2' was specified. ExactName: true should be used for resources that shouldn't include any temp prefix/suffix, such as disks that will be permanently attached to a created instance. Scratch disk is not one of them. | GoogleCloudPlatform-compute-image-tools | go |
@@ -8469,8 +8469,13 @@ defaultdict(<class 'list'>, {'col..., 'col...})]
'For argument "inplace" expected type bool, received type {}.'
.format(type(inplace).__name__))
- sdf = self._sdf.filter(expr)
- internal = self._internal.copy(sdf=sdf)
+ data_columns = [label[0] for label in self._internal.column_labels]
+ sdf = self._sdf.select(self._internal.index_scols
+ + [scol.alias(col) for scol, col
+ in zip(self._internal.column_scols, data_columns)]) \
+ .filter(expr)
+ internal = self._internal.copy(sdf=sdf,
+ column_scols=[scol_for(sdf, col) for col in data_columns])
if inplace:
self._internal = internal | 1 | #
# Copyright (C) 2019 Databricks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
A wrapper class for Spark DataFrame to behave similar to pandas DataFrame.
"""
from collections import OrderedDict
from distutils.version import LooseVersion
import re
import warnings
import inspect
import json
from functools import partial, reduce
import sys
from itertools import zip_longest
from typing import Any, Optional, List, Tuple, Union, Generic, TypeVar, Iterable, Dict, Callable
import numpy as np
import pandas as pd
from pandas.api.types import is_list_like, is_dict_like
if LooseVersion(pd.__version__) >= LooseVersion('0.24'):
from pandas.core.dtypes.common import infer_dtype_from_object
else:
from pandas.core.dtypes.common import _get_dtype_from_object as infer_dtype_from_object
from pandas.core.accessor import CachedAccessor
from pandas.core.dtypes.inference import is_sequence
from pyspark import sql as spark
from pyspark.sql import functions as F, Column
from pyspark.sql.functions import pandas_udf
from pyspark.sql.types import (BooleanType, ByteType, DecimalType, DoubleType, FloatType,
IntegerType, LongType, NumericType, ShortType, StructType,
StructField)
from pyspark.sql.window import Window
from databricks import koalas as ks # For running doctests and reference resolution in PyCharm.
from databricks.koalas.utils import validate_arguments_and_invoke_function, align_diff_frames
from databricks.koalas.generic import _Frame
from databricks.koalas.internal import (_InternalFrame, HIDDEN_COLUMNS, NATURAL_ORDER_COLUMN_NAME,
SPARK_INDEX_NAME_FORMAT, SPARK_DEFAULT_INDEX_NAME)
from databricks.koalas.missing.frame import _MissingPandasLikeDataFrame
from databricks.koalas.ml import corr
from databricks.koalas.utils import column_labels_level, name_like_string, scol_for, validate_axis
from databricks.koalas.typedef import _infer_return_type, as_spark_type, as_python_type
from databricks.koalas.plot import KoalasFramePlotMethods
from databricks.koalas.config import get_option
# These regular expression patterns are complied and defined here to avoid to compile the same
# pattern every time it is used in _repr_ and _repr_html_ in DataFrame.
# Two patterns basically seek the footer string from Pandas'
REPR_PATTERN = re.compile(r"\n\n\[(?P<rows>[0-9]+) rows x (?P<columns>[0-9]+) columns\]$")
REPR_HTML_PATTERN = re.compile(
r"\n\<p\>(?P<rows>[0-9]+) rows × (?P<columns>[0-9]+) columns\<\/p\>\n\<\/div\>$")
_flex_doc_FRAME = """
Get {desc} of dataframe and other, element-wise (binary operator `{op_name}`).
Equivalent to ``{equiv}``. With reverse version, `{reverse}`.
Among flexible wrappers (`add`, `sub`, `mul`, `div`) to
arithmetic operators: `+`, `-`, `*`, `/`, `//`.
Parameters
----------
other : scalar
Any single data
Returns
-------
DataFrame
Result of the arithmetic operation.
Examples
--------
>>> df = ks.DataFrame({{'angles': [0, 3, 4],
... 'degrees': [360, 180, 360]}},
... index=['circle', 'triangle', 'rectangle'],
... columns=['angles', 'degrees'])
>>> df
angles degrees
circle 0 360
triangle 3 180
rectangle 4 360
Add a scalar with operator version which return the same
results. Also reverse version.
>>> df + 1
angles degrees
circle 1 361
triangle 4 181
rectangle 5 361
>>> df.add(1)
angles degrees
circle 1 361
triangle 4 181
rectangle 5 361
>>> df.add(df)
angles degrees
circle 0 720
triangle 6 360
rectangle 8 720
>>> df.radd(1)
angles degrees
circle 1 361
triangle 4 181
rectangle 5 361
Divide and true divide by constant with reverse version.
>>> df / 10
angles degrees
circle 0.0 36.0
triangle 0.3 18.0
rectangle 0.4 36.0
>>> df.div(10)
angles degrees
circle 0.0 36.0
triangle 0.3 18.0
rectangle 0.4 36.0
>>> df.rdiv(10)
angles degrees
circle NaN 0.027778
triangle 3.333333 0.055556
rectangle 2.500000 0.027778
>>> df.truediv(10)
angles degrees
circle 0.0 36.0
triangle 0.3 18.0
rectangle 0.4 36.0
>>> df.rtruediv(10)
angles degrees
circle NaN 0.027778
triangle 3.333333 0.055556
rectangle 2.500000 0.027778
Subtract by constant with reverse version.
>>> df - 1
angles degrees
circle -1 359
triangle 2 179
rectangle 3 359
>>> df.sub(1)
angles degrees
circle -1 359
triangle 2 179
rectangle 3 359
>>> df.rsub(1)
angles degrees
circle 1 -359
triangle -2 -179
rectangle -3 -359
Multiply by constant with reverse version.
>>> df * 1
angles degrees
circle 0 360
triangle 3 180
rectangle 4 360
>>> df.mul(1)
angles degrees
circle 0 360
triangle 3 180
rectangle 4 360
>>> df.rmul(1)
angles degrees
circle 0 360
triangle 3 180
rectangle 4 360
Floor Divide by constant with reverse version.
>>> df // 10
angles degrees
circle 0 36
triangle 0 18
rectangle 0 36
>>> df.floordiv(10)
angles degrees
circle 0 36
triangle 0 18
rectangle 0 36
>>> df.rfloordiv(10)
angles degrees
circle NaN 0
triangle 3.0 0
rectangle 2.0 0
Mod by constant with reverse version.
>>> df % 2
angles degrees
circle 0 0
triangle 1 0
rectangle 0 0
>>> df.mod(2)
angles degrees
circle 0 0
triangle 1 0
rectangle 0 0
>>> df.rmod(2)
angles degrees
circle NaN 2
triangle 2.0 2
rectangle 2.0 2
Power by constant with reverse version.
>>> df ** 2
angles degrees
circle 0.0 129600.0
triangle 9.0 32400.0
rectangle 16.0 129600.0
>>> df.pow(2)
angles degrees
circle 0.0 129600.0
triangle 9.0 32400.0
rectangle 16.0 129600.0
>>> df.rpow(2)
angles degrees
circle 1.0 2.348543e+108
triangle 8.0 1.532496e+54
rectangle 16.0 2.348543e+108
"""
T = TypeVar('T')
if (3, 5) <= sys.version_info < (3, 7):
from typing import GenericMeta
# This is a workaround to support variadic generic in DataFrame in Python 3.5+.
# See https://github.com/python/typing/issues/193
# We wrap the input params by a tuple to mimic variadic generic.
old_getitem = GenericMeta.__getitem__ # type: ignore
def new_getitem(self, params):
if hasattr(self, "is_dataframe"):
return old_getitem(self, Tuple[params])
else:
return old_getitem(self, params)
GenericMeta.__getitem__ = new_getitem # type: ignore
class DataFrame(_Frame, Generic[T]):
"""
Koalas DataFrame that corresponds to Pandas DataFrame logically. This holds Spark DataFrame
internally.
:ivar _internal: an internal immutable Frame to manage metadata.
:type _internal: _InternalFrame
Parameters
----------
data : numpy ndarray (structured or homogeneous), dict, Pandas DataFrame, Spark DataFrame \
or Koalas Series
Dict can contain Series, arrays, constants, or list-like objects
If data is a dict, argument order is maintained for Python 3.6
and later.
Note that if `data` is a Pandas DataFrame, a Spark DataFrame, and a Koalas Series,
other arguments should not be used.
index : Index or array-like
Index to use for resulting frame. Will default to RangeIndex if
no indexing information part of input data and no index provided
columns : Index or array-like
Column labels to use for resulting frame. Will default to
RangeIndex (0, 1, 2, ..., n) if no column labels are provided
dtype : dtype, default None
Data type to force. Only a single dtype is allowed. If None, infer
copy : boolean, default False
Copy data from inputs. Only affects DataFrame / 2d ndarray input
Examples
--------
Constructing DataFrame from a dictionary.
>>> d = {'col1': [1, 2], 'col2': [3, 4]}
>>> df = ks.DataFrame(data=d, columns=['col1', 'col2'])
>>> df
col1 col2
0 1 3
1 2 4
Constructing DataFrame from Pandas DataFrame
>>> df = ks.DataFrame(pd.DataFrame(data=d, columns=['col1', 'col2']))
>>> df
col1 col2
0 1 3
1 2 4
Notice that the inferred dtype is int64.
>>> df.dtypes
col1 int64
col2 int64
dtype: object
To enforce a single dtype:
>>> df = ks.DataFrame(data=d, dtype=np.int8)
>>> df.dtypes
col1 int8
col2 int8
dtype: object
Constructing DataFrame from numpy ndarray:
>>> df2 = ks.DataFrame(np.random.randint(low=0, high=10, size=(5, 5)),
... columns=['a', 'b', 'c', 'd', 'e'])
>>> df2 # doctest: +SKIP
a b c d e
0 3 1 4 9 8
1 4 8 4 8 4
2 7 6 5 6 7
3 8 7 9 1 0
4 2 5 4 3 9
"""
def __init__(self, data=None, index=None, columns=None, dtype=None, copy=False):
if isinstance(data, _InternalFrame):
assert index is None
assert columns is None
assert dtype is None
assert not copy
super(DataFrame, self).__init__(data)
elif isinstance(data, spark.DataFrame):
assert index is None
assert columns is None
assert dtype is None
assert not copy
super(DataFrame, self).__init__(_InternalFrame(sdf=data, index_map=None))
elif isinstance(data, ks.Series):
assert index is None
assert columns is None
assert dtype is None
assert not copy
data = data.to_dataframe()
super(DataFrame, self).__init__(data._internal)
else:
if isinstance(data, pd.DataFrame):
assert index is None
assert columns is None
assert dtype is None
assert not copy
pdf = data
else:
pdf = pd.DataFrame(data=data, index=index, columns=columns, dtype=dtype, copy=copy)
super(DataFrame, self).__init__(_InternalFrame.from_pandas(pdf))
@property
def _sdf(self) -> spark.DataFrame:
return self._internal.sdf
@property
def ndim(self):
"""
Return an int representing the number of array dimensions.
return 2 for DataFrame.
Examples
--------
>>> df = ks.DataFrame([[1, 2], [4, 5], [7, 8]],
... index=['cobra', 'viper', None],
... columns=['max_speed', 'shield'])
>>> df
max_speed shield
cobra 1 2
viper 4 5
NaN 7 8
>>> df.ndim
2
"""
return 2
def _reduce_for_stat_function(self, sfun, name, axis=None, numeric_only=False):
"""
Applies sfun to each column and returns a pd.Series where the number of rows equal the
number of columns.
Parameters
----------
sfun : either an 1-arg function that takes a Column and returns a Column, or
a 2-arg function that takes a Column and its DataType and returns a Column.
axis: used only for sanity check because series only support index axis.
name : original pandas API name.
axis : axis to apply. 0 or 1, or 'index' or 'columns.
numeric_only : boolean, default False
If True, sfun is applied on numeric columns (including booleans) only.
"""
from inspect import signature
from databricks.koalas import Series
axis = validate_axis(axis)
if axis == 0:
exprs = []
num_args = len(signature(sfun).parameters)
for label in self._internal.column_labels:
col_sdf = self._internal.scol_for(label)
col_type = self._internal.spark_type_for(label)
is_numeric_or_boolean = isinstance(col_type, (NumericType, BooleanType))
min_or_max = sfun.__name__ in ('min', 'max')
keep_column = not numeric_only or is_numeric_or_boolean or min_or_max
if keep_column:
if isinstance(col_type, BooleanType) and not min_or_max:
# Stat functions cannot be used with boolean values by default
# Thus, cast to integer (true to 1 and false to 0)
# Exclude the min and max methods though since those work with booleans
col_sdf = col_sdf.cast('integer')
if num_args == 1:
# Only pass in the column if sfun accepts only one arg
col_sdf = sfun(col_sdf)
else: # must be 2
assert num_args == 2
# Pass in both the column and its data type if sfun accepts two args
col_sdf = sfun(col_sdf, col_type)
exprs.append(col_sdf.alias(name_like_string(label)))
sdf = self._sdf.select(*exprs)
pdf = sdf.toPandas()
if self._internal.column_labels_level > 1:
pdf.columns = pd.MultiIndex.from_tuples(self._internal.column_labels)
assert len(pdf) == 1, (sdf, pdf)
row = pdf.iloc[0]
row.name = None
# TODO: return Koalas series.
return row # Return first row as a Series
elif axis == 1:
# Here we execute with the first 1000 to get the return type.
# If the records were less than 1000, it uses pandas API directly for a shortcut.
limit = get_option("compute.shortcut_limit")
pdf = self.head(limit + 1)._to_internal_pandas()
pser = getattr(pdf, name)(axis=axis, numeric_only=numeric_only)
if len(pdf) <= limit:
return Series(pser)
@pandas_udf(returnType=as_spark_type(pser.dtype.type))
def calculate_columns_axis(*cols):
return getattr(pd.concat(cols, axis=1), name)(axis=axis, numeric_only=numeric_only)
df = self._sdf.select(calculate_columns_axis(*self._internal.column_scols).alias("0"))
return DataFrame(df)["0"]
else:
raise ValueError("No axis named %s for object type %s." % (axis, type(axis)))
def _apply_series_op(self, op):
applied = []
for label in self._internal.column_labels:
applied.append(op(self[label]))
internal = self._internal.with_new_columns(applied)
return DataFrame(internal)
# Arithmetic Operators
def _map_series_op(self, op, other):
from databricks.koalas.base import IndexOpsMixin
if not isinstance(other, DataFrame) and (isinstance(other, IndexOpsMixin) or
is_sequence(other)):
raise ValueError(
"%s with a sequence is currently not supported; "
"however, got %s." % (op, type(other)))
if isinstance(other, DataFrame) and self is not other:
if self._internal.column_labels_level != other._internal.column_labels_level:
raise ValueError('cannot join with no overlapping index names')
# Different DataFrames
def apply_op(kdf, this_column_labels, that_column_labels):
for this_label, that_label in zip(this_column_labels, that_column_labels):
yield (getattr(kdf[this_label], op)(kdf[that_label]), this_label)
return align_diff_frames(apply_op, self, other, fillna=True, how="full")
else:
# DataFrame and Series
if isinstance(other, DataFrame):
return self._apply_series_op(lambda kser: getattr(kser, op)(other[kser.name]))
else:
return self._apply_series_op(lambda kser: getattr(kser, op)(other))
def __add__(self, other):
return self._map_series_op("add", other)
def __radd__(self, other):
return self._map_series_op("radd", other)
def __div__(self, other):
return self._map_series_op("div", other)
def __rdiv__(self, other):
return self._map_series_op("rdiv", other)
def __truediv__(self, other):
return self._map_series_op("truediv", other)
def __rtruediv__(self, other):
return self._map_series_op("rtruediv", other)
def __mul__(self, other):
return self._map_series_op("mul", other)
def __rmul__(self, other):
return self._map_series_op("rmul", other)
def __sub__(self, other):
return self._map_series_op("sub", other)
def __rsub__(self, other):
return self._map_series_op("rsub", other)
def __pow__(self, other):
return self._map_series_op("pow", other)
def __rpow__(self, other):
return self._map_series_op("rpow", other)
def __mod__(self, other):
return self._map_series_op("mod", other)
def __rmod__(self, other):
return self._map_series_op("rmod", other)
def __floordiv__(self, other):
return self._map_series_op("floordiv", other)
def __rfloordiv__(self, other):
return self._map_series_op("rfloordiv", other)
def add(self, other):
return self + other
# create accessor for plot
plot = CachedAccessor("plot", KoalasFramePlotMethods)
def hist(self, bins=10, **kwds):
return self.plot.hist(bins, **kwds)
hist.__doc__ = KoalasFramePlotMethods.hist.__doc__
def kde(self, bw_method=None, ind=None, **kwds):
return self.plot.kde(bw_method, ind, **kwds)
kde.__doc__ = KoalasFramePlotMethods.kde.__doc__
add.__doc__ = _flex_doc_FRAME.format(
desc='Addition',
op_name='+',
equiv='dataframe + other',
reverse='radd')
def radd(self, other):
return other + self
radd.__doc__ = _flex_doc_FRAME.format(
desc='Addition',
op_name="+",
equiv="other + dataframe",
reverse='add')
def div(self, other):
return self / other
div.__doc__ = _flex_doc_FRAME.format(
desc='Floating division',
op_name="/",
equiv="dataframe / other",
reverse='rdiv')
divide = div
def rdiv(self, other):
return other / self
rdiv.__doc__ = _flex_doc_FRAME.format(
desc='Floating division',
op_name="/",
equiv="other / dataframe",
reverse='div')
def truediv(self, other):
return self / other
truediv.__doc__ = _flex_doc_FRAME.format(
desc='Floating division',
op_name="/",
equiv="dataframe / other",
reverse='rtruediv')
def rtruediv(self, other):
return other / self
rtruediv.__doc__ = _flex_doc_FRAME.format(
desc='Floating division',
op_name="/",
equiv="other / dataframe",
reverse='truediv')
def mul(self, other):
return self * other
mul.__doc__ = _flex_doc_FRAME.format(
desc='Multiplication',
op_name="*",
equiv="dataframe * other",
reverse='rmul')
multiply = mul
def rmul(self, other):
return other * self
rmul.__doc__ = _flex_doc_FRAME.format(
desc='Multiplication',
op_name="*",
equiv="other * dataframe",
reverse='mul')
def sub(self, other):
return self - other
sub.__doc__ = _flex_doc_FRAME.format(
desc='Subtraction',
op_name="-",
equiv="dataframe - other",
reverse='rsub')
subtract = sub
def rsub(self, other):
return other - self
rsub.__doc__ = _flex_doc_FRAME.format(
desc='Subtraction',
op_name="-",
equiv="other - dataframe",
reverse='sub')
def mod(self, other):
return self % other
mod.__doc__ = _flex_doc_FRAME.format(
desc='Modulo',
op_name='%',
equiv='dataframe % other',
reverse='rmod')
def rmod(self, other):
return other % self
rmod.__doc__ = _flex_doc_FRAME.format(
desc='Modulo',
op_name='%',
equiv='other % dataframe',
reverse='mod')
def pow(self, other):
return self ** other
pow.__doc__ = _flex_doc_FRAME.format(
desc='Exponential power of series',
op_name='**',
equiv='dataframe ** other',
reverse='rpow')
def rpow(self, other):
return other ** self
rpow.__doc__ = _flex_doc_FRAME.format(
desc='Exponential power',
op_name='**',
equiv='other ** dataframe',
reverse='pow')
def floordiv(self, other):
return self // other
floordiv.__doc__ = _flex_doc_FRAME.format(
desc='Integer division',
op_name='//',
equiv='dataframe // other',
reverse='rfloordiv')
def rfloordiv(self, other):
return other // self
rfloordiv.__doc__ = _flex_doc_FRAME.format(
desc='Integer division',
op_name='//',
equiv='other // dataframe',
reverse='floordiv')
# Comparison Operators
def __eq__(self, other):
return self._map_series_op("eq", other)
def __ne__(self, other):
return self._map_series_op("ne", other)
def __lt__(self, other):
return self._map_series_op("lt", other)
def __le__(self, other):
return self._map_series_op("le", other)
def __ge__(self, other):
return self._map_series_op("ge", other)
def __gt__(self, other):
return self._map_series_op("gt", other)
def eq(self, other):
"""
Compare if the current value is equal to the other.
>>> df = ks.DataFrame({'a': [1, 2, 3, 4],
... 'b': [1, np.nan, 1, np.nan]},
... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])
>>> df.eq(1)
a b
a True True
b False False
c False True
d False False
"""
return self == other
equals = eq
def gt(self, other):
"""
Compare if the current value is greater than the other.
>>> df = ks.DataFrame({'a': [1, 2, 3, 4],
... 'b': [1, np.nan, 1, np.nan]},
... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])
>>> df.gt(2)
a b
a False False
b False False
c True False
d True False
"""
return self > other
def ge(self, other):
"""
Compare if the current value is greater than or equal to the other.
>>> df = ks.DataFrame({'a': [1, 2, 3, 4],
... 'b': [1, np.nan, 1, np.nan]},
... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])
>>> df.ge(1)
a b
a True True
b True False
c True True
d True False
"""
return self >= other
def lt(self, other):
"""
Compare if the current value is less than the other.
>>> df = ks.DataFrame({'a': [1, 2, 3, 4],
... 'b': [1, np.nan, 1, np.nan]},
... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])
>>> df.lt(1)
a b
a False False
b False False
c False False
d False False
"""
return self < other
def le(self, other):
"""
Compare if the current value is less than or equal to the other.
>>> df = ks.DataFrame({'a': [1, 2, 3, 4],
... 'b': [1, np.nan, 1, np.nan]},
... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])
>>> df.le(2)
a b
a True True
b True False
c False True
d False False
"""
return self <= other
def ne(self, other):
"""
Compare if the current value is not equal to the other.
>>> df = ks.DataFrame({'a': [1, 2, 3, 4],
... 'b': [1, np.nan, 1, np.nan]},
... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])
>>> df.ne(1)
a b
a False False
b True True
c True False
d True True
"""
return self != other
def applymap(self, func):
"""
Apply a function to a Dataframe elementwise.
This method applies a function that accepts and returns a scalar
to every element of a DataFrame.
.. note:: this API executes the function once to infer the type which is
potentially expensive, for instance, when the dataset is created after
aggregations or sorting.
To avoid this, specify return type in ``func``, for instance, as below:
>>> def square(x) -> np.int32:
... return x ** 2
Koalas uses return type hint and does not try to infer the type.
Parameters
----------
func : callable
Python function, returns a single value from a single value.
Returns
-------
DataFrame
Transformed DataFrame.
Examples
--------
>>> df = ks.DataFrame([[1, 2.12], [3.356, 4.567]])
>>> df
0 1
0 1.000 2.120
1 3.356 4.567
>>> def str_len(x) -> int:
... return len(str(x))
>>> df.applymap(str_len)
0 1
0 3 4
1 5 5
>>> def power(x) -> float:
... return x ** 2
>>> df.applymap(power)
0 1
0 1.000000 4.494400
1 11.262736 20.857489
You can omit the type hint and let Koalas infer its type.
>>> df.applymap(lambda x: x ** 2)
0 1
0 1.000000 4.494400
1 11.262736 20.857489
"""
# TODO: We can implement shortcut theoretically since it creates new DataFrame
# anyway and we don't have to worry about operations on different DataFrames.
return self._apply_series_op(lambda kser: kser.apply(func))
# TODO: not all arguments are implemented comparing to Pandas' for now.
def aggregate(self, func: Union[List[str], Dict[str, List[str]]]):
"""Aggregate using one or more operations over the specified axis.
Parameters
----------
func : dict or a list
a dict mapping from column name (string) to
aggregate functions (list of strings).
If a list is given, the aggregation is performed against
all columns.
Returns
-------
DataFrame
Notes
-----
`agg` is an alias for `aggregate`. Use the alias.
See Also
--------
databricks.koalas.Series.groupby
databricks.koalas.DataFrame.groupby
Examples
--------
>>> df = ks.DataFrame([[1, 2, 3],
... [4, 5, 6],
... [7, 8, 9],
... [np.nan, np.nan, np.nan]],
... columns=['A', 'B', 'C'])
>>> df
A B C
0 1.0 2.0 3.0
1 4.0 5.0 6.0
2 7.0 8.0 9.0
3 NaN NaN NaN
Aggregate these functions over the rows.
>>> df.agg(['sum', 'min'])[['A', 'B', 'C']]
A B C
min 1.0 2.0 3.0
sum 12.0 15.0 18.0
Different aggregations per column.
>>> df.agg({'A' : ['sum', 'min'], 'B' : ['min', 'max']})[['A', 'B']]
A B
max NaN 8.0
min 1.0 2.0
sum 12.0 NaN
"""
from databricks.koalas.groupby import GroupBy
if isinstance(func, list):
if all((isinstance(f, str) for f in func)):
func = dict([
(column, func) for column in self.columns])
else:
raise ValueError("If the given function is a list, it "
"should only contains function names as strings.")
if not isinstance(func, dict) or \
not all(isinstance(key, str) and
(isinstance(value, str) or
isinstance(value, list) and all(isinstance(v, str) for v in value))
for key, value in func.items()):
raise ValueError("aggs must be a dict mapping from column name (string) to aggregate "
"functions (list of strings).")
kdf = DataFrame(GroupBy._spark_groupby(self, func)) # type: DataFrame
# The codes below basically converts:
#
# A B
# sum min min max
# 0 12.0 1.0 2.0 8.0
#
# to:
# A B
# max NaN 8.0
# min 1.0 2.0
# sum 12.0 NaN
#
# Aggregated output is usually pretty much small. So it is fine to directly use pandas API.
pdf = kdf.to_pandas().stack()
pdf.index = pdf.index.droplevel()
pdf.columns.names = [None]
pdf.index.names = [None]
return DataFrame(pdf[list(func.keys())])
agg = aggregate
def corr(self, method='pearson'):
"""
Compute pairwise correlation of columns, excluding NA/null values.
Parameters
----------
method : {'pearson', 'spearman'}
* pearson : standard correlation coefficient
* spearman : Spearman rank correlation
Returns
-------
y : pandas.DataFrame
See Also
--------
Series.corr
Examples
--------
>>> df = ks.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)],
... columns=['dogs', 'cats'])
>>> df.corr('pearson')
dogs cats
dogs 1.000000 -0.851064
cats -0.851064 1.000000
>>> df.corr('spearman')
dogs cats
dogs 1.000000 -0.948683
cats -0.948683 1.000000
Notes
-----
There are behavior differences between Koalas and pandas.
* the `method` argument only accepts 'pearson', 'spearman'
* the data should not contain NaNs. Koalas will return an error.
* Koalas doesn't support the following argument(s).
* `min_periods` argument is not supported
"""
return ks.from_pandas(corr(self, method))
def iteritems(self) -> Iterable:
"""
Iterator over (column name, Series) pairs.
Iterates over the DataFrame columns, returning a tuple with
the column name and the content as a Series.
Returns
-------
label : object
The column names for the DataFrame being iterated over.
content : Series
The column entries belonging to each label, as a Series.
Examples
--------
>>> df = ks.DataFrame({'species': ['bear', 'bear', 'marsupial'],
... 'population': [1864, 22000, 80000]},
... index=['panda', 'polar', 'koala'],
... columns=['species', 'population'])
>>> df
species population
panda bear 1864
polar bear 22000
koala marsupial 80000
>>> for label, content in df.iteritems():
... print('label:', label)
... print('content:', content.to_string())
...
label: species
content: panda bear
polar bear
koala marsupial
label: population
content: panda 1864
polar 22000
koala 80000
"""
cols = list(self.columns)
return list((col_name, self[col_name]) for col_name in cols)
def iterrows(self):
"""
Iterate over DataFrame rows as (index, Series) pairs.
Yields
------
index : label or tuple of label
The index of the row. A tuple for a `MultiIndex`.
data : pandas.Series
The data of the row as a Series.
it : generator
A generator that iterates over the rows of the frame.
Notes
-----
1. Because ``iterrows`` returns a Series for each row,
it does **not** preserve dtypes across the rows (dtypes are
preserved across columns for DataFrames). For example,
>>> df = ks.DataFrame([[1, 1.5]], columns=['int', 'float'])
>>> row = next(df.iterrows())[1]
>>> row
int 1.0
float 1.5
Name: 0, dtype: float64
>>> print(row['int'].dtype)
float64
>>> print(df['int'].dtype)
int64
To preserve dtypes while iterating over the rows, it is better
to use :meth:`itertuples` which returns namedtuples of the values
and which is generally faster than ``iterrows``.
2. You should **never modify** something you are iterating over.
This is not guaranteed to work in all cases. Depending on the
data types, the iterator returns a copy and not a view, and writing
to it will have no effect.
"""
columns = self.columns
internal_index_columns = self._internal.index_columns
internal_data_columns = self._internal.data_columns
def extract_kv_from_spark_row(row):
k = row[internal_index_columns[0]] if len(internal_index_columns) == 1 else tuple(
row[c] for c in internal_index_columns)
v = [row[c] for c in internal_data_columns]
return k, v
for k, v in map(extract_kv_from_spark_row, self._sdf.toLocalIterator()):
s = pd.Series(v, index=columns, name=k)
yield k, s
def items(self) -> Iterable:
"""This is an alias of ``iteritems``."""
return self.iteritems()
def to_clipboard(self, excel=True, sep=None, **kwargs):
"""
Copy object to the system clipboard.
Write a text representation of object to the system clipboard.
This can be pasted into Excel, for example.
.. note:: This method should only be used if the resulting DataFrame is expected
to be small, as all the data is loaded into the driver's memory.
Parameters
----------
excel : bool, default True
- True, use the provided separator, writing in a csv format for
allowing easy pasting into excel.
- False, write a string representation of the object to the
clipboard.
sep : str, default ``'\\t'``
Field delimiter.
**kwargs
These parameters will be passed to DataFrame.to_csv.
Notes
-----
Requirements for your platform.
- Linux : `xclip`, or `xsel` (with `gtk` or `PyQt4` modules)
- Windows : none
- OS X : none
See Also
--------
read_clipboard : Read text from clipboard.
Examples
--------
Copy the contents of a DataFrame to the clipboard.
>>> df = ks.DataFrame([[1, 2, 3], [4, 5, 6]], columns=['A', 'B', 'C']) # doctest: +SKIP
>>> df.to_clipboard(sep=',') # doctest: +SKIP
... # Wrote the following to the system clipboard:
... # ,A,B,C
... # 0,1,2,3
... # 1,4,5,6
We can omit the the index by passing the keyword `index` and setting
it to false.
>>> df.to_clipboard(sep=',', index=False) # doctest: +SKIP
... # Wrote the following to the system clipboard:
... # A,B,C
... # 1,2,3
... # 4,5,6
This function also works for Series:
>>> df = ks.Series([1, 2, 3, 4, 5, 6, 7], name='x') # doctest: +SKIP
>>> df.to_clipboard(sep=',') # doctest: +SKIP
... # Wrote the following to the system clipboard:
... # 0, 1
... # 1, 2
... # 2, 3
... # 3, 4
... # 4, 5
... # 5, 6
... # 6, 7
"""
args = locals()
kdf = self
return validate_arguments_and_invoke_function(
kdf._to_internal_pandas(), self.to_clipboard, pd.DataFrame.to_clipboard, args)
def to_html(self, buf=None, columns=None, col_space=None, header=True, index=True,
na_rep='NaN', formatters=None, float_format=None, sparsify=None, index_names=True,
justify=None, max_rows=None, max_cols=None, show_dimensions=False, decimal='.',
bold_rows=True, classes=None, escape=True, notebook=False, border=None,
table_id=None, render_links=False):
"""
Render a DataFrame as an HTML table.
.. note:: This method should only be used if the resulting Pandas object is expected
to be small, as all the data is loaded into the driver's memory. If the input
is large, set max_rows parameter.
Parameters
----------
buf : StringIO-like, optional
Buffer to write to.
columns : sequence, optional, default None
The subset of columns to write. Writes all columns by default.
col_space : int, optional
The minimum width of each column.
header : bool, optional
Write out the column names. If a list of strings is given, it
is assumed to be aliases for the column names
index : bool, optional, default True
Whether to print index (row) labels.
na_rep : str, optional, default 'NaN'
String representation of NAN to use.
formatters : list or dict of one-param. functions, optional
Formatter functions to apply to columns' elements by position or
name.
The result of each function must be a unicode string.
List must be of length equal to the number of columns.
float_format : one-parameter function, optional, default None
Formatter function to apply to columns' elements if they are
floats. The result of this function must be a unicode string.
sparsify : bool, optional, default True
Set to False for a DataFrame with a hierarchical index to print
every multiindex key at each row.
index_names : bool, optional, default True
Prints the names of the indexes.
justify : str, default None
How to justify the column labels. If None uses the option from
the print configuration (controlled by set_option), 'right' out
of the box. Valid values are
* left
* right
* center
* justify
* justify-all
* start
* end
* inherit
* match-parent
* initial
* unset.
max_rows : int, optional
Maximum number of rows to display in the console.
max_cols : int, optional
Maximum number of columns to display in the console.
show_dimensions : bool, default False
Display DataFrame dimensions (number of rows by number of columns).
decimal : str, default '.'
Character recognized as decimal separator, e.g. ',' in Europe.
bold_rows : bool, default True
Make the row labels bold in the output.
classes : str or list or tuple, default None
CSS class(es) to apply to the resulting html table.
escape : bool, default True
Convert the characters <, >, and & to HTML-safe sequences.
notebook : {True, False}, default False
Whether the generated HTML is for IPython Notebook.
border : int
A ``border=border`` attribute is included in the opening
`<table>` tag. Default ``pd.options.html.border``.
table_id : str, optional
A css id is included in the opening `<table>` tag if specified.
render_links : bool, default False
Convert URLs to HTML links (only works with Pandas 0.24+).
Returns
-------
str (or unicode, depending on data and options)
String representation of the dataframe.
See Also
--------
to_string : Convert DataFrame to a string.
"""
# Make sure locals() call is at the top of the function so we don't capture local variables.
args = locals()
if max_rows is not None:
kdf = self.head(max_rows)
else:
kdf = self
return validate_arguments_and_invoke_function(
kdf._to_internal_pandas(), self.to_html, pd.DataFrame.to_html, args)
def to_string(self, buf=None, columns=None, col_space=None, header=True,
index=True, na_rep='NaN', formatters=None, float_format=None,
sparsify=None, index_names=True, justify=None,
max_rows=None, max_cols=None, show_dimensions=False,
decimal='.', line_width=None):
"""
Render a DataFrame to a console-friendly tabular output.
.. note:: This method should only be used if the resulting Pandas object is expected
to be small, as all the data is loaded into the driver's memory. If the input
is large, set max_rows parameter.
Parameters
----------
buf : StringIO-like, optional
Buffer to write to.
columns : sequence, optional, default None
The subset of columns to write. Writes all columns by default.
col_space : int, optional
The minimum width of each column.
header : bool, optional
Write out the column names. If a list of strings is given, it
is assumed to be aliases for the column names
index : bool, optional, default True
Whether to print index (row) labels.
na_rep : str, optional, default 'NaN'
String representation of NAN to use.
formatters : list or dict of one-param. functions, optional
Formatter functions to apply to columns' elements by position or
name.
The result of each function must be a unicode string.
List must be of length equal to the number of columns.
float_format : one-parameter function, optional, default None
Formatter function to apply to columns' elements if they are
floats. The result of this function must be a unicode string.
sparsify : bool, optional, default True
Set to False for a DataFrame with a hierarchical index to print
every multiindex key at each row.
index_names : bool, optional, default True
Prints the names of the indexes.
justify : str, default None
How to justify the column labels. If None uses the option from
the print configuration (controlled by set_option), 'right' out
of the box. Valid values are
* left
* right
* center
* justify
* justify-all
* start
* end
* inherit
* match-parent
* initial
* unset.
max_rows : int, optional
Maximum number of rows to display in the console.
max_cols : int, optional
Maximum number of columns to display in the console.
show_dimensions : bool, default False
Display DataFrame dimensions (number of rows by number of columns).
decimal : str, default '.'
Character recognized as decimal separator, e.g. ',' in Europe.
line_width : int, optional
Width to wrap a line in characters.
Returns
-------
str (or unicode, depending on data and options)
String representation of the dataframe.
See Also
--------
to_html : Convert DataFrame to HTML.
Examples
--------
>>> df = ks.DataFrame({'col1': [1, 2, 3], 'col2': [4, 5, 6]}, columns=['col1', 'col2'])
>>> print(df.to_string())
col1 col2
0 1 4
1 2 5
2 3 6
>>> print(df.to_string(max_rows=2))
col1 col2
0 1 4
1 2 5
"""
# Make sure locals() call is at the top of the function so we don't capture local variables.
args = locals()
if max_rows is not None:
kdf = self.head(max_rows)
else:
kdf = self
return validate_arguments_and_invoke_function(
kdf._to_internal_pandas(), self.to_string, pd.DataFrame.to_string, args)
def to_dict(self, orient='dict', into=dict):
"""
Convert the DataFrame to a dictionary.
The type of the key-value pairs can be customized with the parameters
(see below).
.. note:: This method should only be used if the resulting Pandas DataFrame is expected
to be small, as all the data is loaded into the driver's memory.
Parameters
----------
orient : str {'dict', 'list', 'series', 'split', 'records', 'index'}
Determines the type of the values of the dictionary.
- 'dict' (default) : dict like {column -> {index -> value}}
- 'list' : dict like {column -> [values]}
- 'series' : dict like {column -> Series(values)}
- 'split' : dict like
{'index' -> [index], 'columns' -> [columns], 'data' -> [values]}
- 'records' : list like
[{column -> value}, ... , {column -> value}]
- 'index' : dict like {index -> {column -> value}}
Abbreviations are allowed. `s` indicates `series` and `sp`
indicates `split`.
into : class, default dict
The collections.abc.Mapping subclass used for all Mappings
in the return value. Can be the actual class or an empty
instance of the mapping type you want. If you want a
collections.defaultdict, you must pass it initialized.
Returns
-------
dict, list or collections.abc.Mapping
Return a collections.abc.Mapping object representing the DataFrame.
The resulting transformation depends on the `orient` parameter.
Examples
--------
>>> df = ks.DataFrame({'col1': [1, 2],
... 'col2': [0.5, 0.75]},
... index=['row1', 'row2'],
... columns=['col1', 'col2'])
>>> df
col1 col2
row1 1 0.50
row2 2 0.75
>>> df_dict = df.to_dict()
>>> sorted([(key, sorted(values.items())) for key, values in df_dict.items()])
[('col1', [('row1', 1), ('row2', 2)]), ('col2', [('row1', 0.5), ('row2', 0.75)])]
You can specify the return orientation.
>>> df_dict = df.to_dict('series')
>>> sorted(df_dict.items())
[('col1', row1 1
row2 2
Name: col1, dtype: int64), ('col2', row1 0.50
row2 0.75
Name: col2, dtype: float64)]
>>> df_dict = df.to_dict('split')
>>> sorted(df_dict.items()) # doctest: +ELLIPSIS
[('columns', ['col1', 'col2']), ('data', [[1..., 0.75]]), ('index', ['row1', 'row2'])]
>>> df_dict = df.to_dict('records')
>>> [sorted(values.items()) for values in df_dict] # doctest: +ELLIPSIS
[[('col1', 1...), ('col2', 0.5)], [('col1', 2...), ('col2', 0.75)]]
>>> df_dict = df.to_dict('index')
>>> sorted([(key, sorted(values.items())) for key, values in df_dict.items()])
[('row1', [('col1', 1), ('col2', 0.5)]), ('row2', [('col1', 2), ('col2', 0.75)])]
You can also specify the mapping type.
>>> from collections import OrderedDict, defaultdict
>>> df.to_dict(into=OrderedDict)
OrderedDict([('col1', OrderedDict([('row1', 1), ('row2', 2)])), \
('col2', OrderedDict([('row1', 0.5), ('row2', 0.75)]))])
If you want a `defaultdict`, you need to initialize it:
>>> dd = defaultdict(list)
>>> df.to_dict('records', into=dd) # doctest: +ELLIPSIS
[defaultdict(<class 'list'>, {'col..., 'col...}), \
defaultdict(<class 'list'>, {'col..., 'col...})]
"""
# Make sure locals() call is at the top of the function so we don't capture local variables.
args = locals()
kdf = self
return validate_arguments_and_invoke_function(
kdf._to_internal_pandas(), self.to_dict, pd.DataFrame.to_dict, args)
def to_latex(self, buf=None, columns=None, col_space=None, header=True, index=True,
na_rep='NaN', formatters=None, float_format=None, sparsify=None, index_names=True,
bold_rows=False, column_format=None, longtable=None, escape=None, encoding=None,
decimal='.', multicolumn=None, multicolumn_format=None, multirow=None):
r"""
Render an object to a LaTeX tabular environment table.
Render an object to a tabular environment table. You can splice this into a LaTeX
document. Requires usepackage{booktabs}.
.. note:: This method should only be used if the resulting Pandas object is expected
to be small, as all the data is loaded into the driver's memory. If the input
is large, consider alternative formats.
Parameters
----------
buf : file descriptor or None
Buffer to write to. If None, the output is returned as a string.
columns : list of label, optional
The subset of columns to write. Writes all columns by default.
col_space : int, optional
The minimum width of each column.
header : bool or list of str, default True
Write out the column names. If a list of strings is given, it is assumed to be aliases
for the column names.
index : bool, default True
Write row names (index).
na_rep : str, default ‘NaN’
Missing data representation.
formatters : list of functions or dict of {str: function}, optional
Formatter functions to apply to columns’ elements by position or name. The result of
each function must be a unicode string. List must be of length equal to the number of
columns.
float_format : str, optional
Format string for floating point numbers.
sparsify : bool, optional
Set to False for a DataFrame with a hierarchical index to print every multiindex key at
each row. By default, the value will be read from the config module.
index_names : bool, default True
Prints the names of the indexes.
bold_rows : bool, default False
Make the row labels bold in the output.
column_format : str, optional
The columns format as specified in LaTeX table format e.g. ‘rcl’ for 3 columns. By
default, ‘l’ will be used for all columns except columns of numbers, which default
to ‘r’.
longtable : bool, optional
By default, the value will be read from the pandas config module. Use a longtable
environment instead of tabular. Requires adding a usepackage{longtable} to your LaTeX
preamble.
escape : bool, optional
By default, the value will be read from the pandas config module. When set to False
prevents from escaping latex special characters in column names.
encoding : str, optional
A string representing the encoding to use in the output file, defaults to ‘ascii’ on
Python 2 and ‘utf-8’ on Python 3.
decimal : str, default ‘.’
Character recognized as decimal separator, e.g. ‘,’ in Europe.
multicolumn : bool, default True
Use multicolumn to enhance MultiIndex columns. The default will be read from the config
module.
multicolumn_format : str, default ‘l’
The alignment for multicolumns, similar to column_format The default will be read from
the config module.
multirow : bool, default False
Use multirow to enhance MultiIndex rows. Requires adding a usepackage{multirow} to your
LaTeX preamble. Will print centered labels (instead of top-aligned) across the contained
rows, separating groups via clines. The default will be read from the pandas config
module.
Returns
-------
str or None
If buf is None, returns the resulting LateX format as a string. Otherwise returns None.
See Also
--------
DataFrame.to_string : Render a DataFrame to a console-friendly
tabular output.
DataFrame.to_html : Render a DataFrame as an HTML table.
Examples
--------
>>> df = ks.DataFrame({'name': ['Raphael', 'Donatello'],
... 'mask': ['red', 'purple'],
... 'weapon': ['sai', 'bo staff']},
... columns=['name', 'mask', 'weapon'])
>>> df.to_latex(index=False) # doctest: +NORMALIZE_WHITESPACE
'\\begin{tabular}{lll}\n\\toprule\n name & mask & weapon
\\\\\n\\midrule\n Raphael & red & sai \\\\\n Donatello &
purple & bo staff \\\\\n\\bottomrule\n\\end{tabular}\n'
"""
args = locals()
kdf = self
return validate_arguments_and_invoke_function(
kdf._to_internal_pandas(), self.to_latex, pd.DataFrame.to_latex, args)
# TODO: enable doctests once we drop Spark 2.3.x (due to type coercion logic
# when creating arrays)
def transpose(self):
"""
Transpose index and columns.
Reflect the DataFrame over its main diagonal by writing rows as columns
and vice-versa. The property :attr:`.T` is an accessor to the method
:meth:`transpose`.
.. note:: This method is based on an expensive operation due to the nature
of big data. Internally it needs to generate each row for each value, and
then group twice - it is a huge operation. To prevent misusage, this method
has the 'compute.max_rows' default limit of input length, and raises a ValueError.
>>> from databricks.koalas.config import option_context
>>> with option_context('compute.max_rows', 1000): # doctest: +NORMALIZE_WHITESPACE
... ks.DataFrame({'a': range(1001)}).transpose()
Traceback (most recent call last):
...
ValueError: Current DataFrame has more then the given limit 1000 rows.
Please set 'compute.max_rows' by using 'databricks.koalas.config.set_option'
to retrieve to retrieve more than 1000 rows. Note that, before changing the
'compute.max_rows', this operation is considerably expensive.
Returns
-------
DataFrame
The transposed DataFrame.
Notes
-----
Transposing a DataFrame with mixed dtypes will result in a homogeneous
DataFrame with the coerced dtype. For instance, if int and float have
to be placed in same column, it becomes float. If type coercion is not
possible, it fails.
Also, note that the values in index should be unique because they become
unique column names.
In addition, if Spark 2.3 is used, the types should always be exactly same.
Examples
--------
**Square DataFrame with homogeneous dtype**
>>> d1 = {'col1': [1, 2], 'col2': [3, 4]}
>>> df1 = ks.DataFrame(data=d1, columns=['col1', 'col2'])
>>> df1
col1 col2
0 1 3
1 2 4
>>> df1_transposed = df1.T.sort_index() # doctest: +SKIP
>>> df1_transposed # doctest: +SKIP
0 1
col1 1 2
col2 3 4
When the dtype is homogeneous in the original DataFrame, we get a
transposed DataFrame with the same dtype:
>>> df1.dtypes
col1 int64
col2 int64
dtype: object
>>> df1_transposed.dtypes # doctest: +SKIP
0 int64
1 int64
dtype: object
**Non-square DataFrame with mixed dtypes**
>>> d2 = {'score': [9.5, 8],
... 'kids': [0, 0],
... 'age': [12, 22]}
>>> df2 = ks.DataFrame(data=d2, columns=['score', 'kids', 'age'])
>>> df2
score kids age
0 9.5 0 12
1 8.0 0 22
>>> df2_transposed = df2.T.sort_index() # doctest: +SKIP
>>> df2_transposed # doctest: +SKIP
0 1
age 12.0 22.0
kids 0.0 0.0
score 9.5 8.0
When the DataFrame has mixed dtypes, we get a transposed DataFrame with
the coerced dtype:
>>> df2.dtypes
score float64
kids int64
age int64
dtype: object
>>> df2_transposed.dtypes # doctest: +SKIP
0 float64
1 float64
dtype: object
"""
max_compute_count = get_option("compute.max_rows")
if max_compute_count is not None:
pdf = self.head(max_compute_count + 1)._to_internal_pandas()
if len(pdf) > max_compute_count:
raise ValueError(
"Current DataFrame has more then the given limit {0} rows. "
"Please set 'compute.max_rows' by using 'databricks.koalas.config.set_option' "
"to retrieve to retrieve more than {0} rows. Note that, before changing the "
"'compute.max_rows', this operation is considerably expensive."
.format(max_compute_count))
return DataFrame(pdf.transpose())
# Explode the data to be pairs.
#
# For instance, if the current input DataFrame is as below:
#
# +------+------+------+------+------+
# |index1|index2|(a,x1)|(a,x2)|(b,x3)|
# +------+------+------+------+------+
# | y1| z1| 1| 0| 0|
# | y2| z2| 0| 50| 0|
# | y3| z3| 3| 2| 1|
# +------+------+------+------+------+
#
# Output of `exploded_df` becomes as below:
#
# +-----------------+-----------------+-----------------+-----+
# | index|__index_level_0__|__index_level_1__|value|
# +-----------------+-----------------+-----------------+-----+
# |{"a":["y1","z1"]}| a| x1| 1|
# |{"a":["y1","z1"]}| a| x2| 0|
# |{"a":["y1","z1"]}| b| x3| 0|
# |{"a":["y2","z2"]}| a| x1| 0|
# |{"a":["y2","z2"]}| a| x2| 50|
# |{"a":["y2","z2"]}| b| x3| 0|
# |{"a":["y3","z3"]}| a| x1| 3|
# |{"a":["y3","z3"]}| a| x2| 2|
# |{"a":["y3","z3"]}| b| x3| 1|
# +-----------------+-----------------+-----------------+-----+
pairs = F.explode(F.array(*[
F.struct(
[F.lit(col).alias(SPARK_INDEX_NAME_FORMAT(i)) for i, col in enumerate(label)] +
[self[label]._scol.alias("value")]
) for label in self._internal.column_labels]))
exploded_df = self._sdf.withColumn("pairs", pairs).select(
[F.to_json(F.struct(F.array([scol.cast('string')
for scol in self._internal.index_scols])
.alias('a'))).alias('index'),
F.col("pairs.*")])
# After that, executes pivot with key and its index column.
# Note that index column should contain unique values since column names
# should be unique.
internal_index_columns = [SPARK_INDEX_NAME_FORMAT(i)
for i in range(self._internal.column_labels_level)]
pivoted_df = exploded_df.groupBy(internal_index_columns).pivot('index')
transposed_df = pivoted_df.agg(F.first(F.col("value")))
new_data_columns = list(filter(lambda x: x not in internal_index_columns,
transposed_df.columns))
internal = self._internal.copy(
sdf=transposed_df,
index_map=[(col, None) for col in internal_index_columns],
column_labels=[tuple(json.loads(col)['a']) for col in new_data_columns],
column_scols=[scol_for(transposed_df, col) for col in new_data_columns],
column_label_names=None)
return DataFrame(internal)
T = property(transpose)
def apply(self, func, axis=0):
"""
Apply a function along an axis of the DataFrame.
Objects passed to the function are Series objects whose index is
either the DataFrame's index (``axis=0``) or the DataFrame's columns
(``axis=1``).
.. note:: when `axis` is 0 or 'index', the `func` is unable to access
to the whole input series. Koalas internally splits the input series into multiple
batches and calls `func` with each batch multiple times. Therefore, operations
such as global aggregations are impossible. See the example below.
>>> # This case does not return the length of whole series but of the batch internally
... # used.
... def length(s) -> int:
... return len(s)
...
>>> df = ks.DataFrame({'A': range(1000)})
>>> df.apply(length, axis=0) # doctest: +SKIP
0 83
1 83
2 83
...
10 83
11 83
Name: 0, dtype: int32
.. note:: this API executes the function once to infer the type which is
potentially expensive, for instance, when the dataset is created after
aggregations or sorting.
To avoid this, specify return type in ``func``, for instance, as below:
>>> def square(s) -> ks.Series[np.int32]:
... return s ** 2
Koalas uses return type hint and does not try to infer the type.
In case when axis is 1, it requires to specify `DataFrame` with type hints
as below:
>>> def plus_one(x) -> ks.DataFrame[float, float]:
... return x + 1
If the return type is specified, the output column names become
`c0, c1, c2 ... cn`. These names are positionally mapped to the returned
DataFrame in ``func``. See examples below.
Parameters
----------
func : function
Function to apply to each column or row.
axis : {0 or 'index', 1 or 'columns'}, default 0
Axis along which the function is applied:
* 0 or 'index': apply function to each column.
* 1 or 'columns': apply function to each row.
Returns
-------
Series or DataFrame
Result of applying ``func`` along the given axis of the
DataFrame.
See Also
--------
DataFrame.applymap: For elementwise operations.
DataFrame.aggregate: Only perform aggregating type operations.
DataFrame.transform: Only perform transforming type operations.
Examples
--------
>>> df = ks.DataFrame([[4, 9]] * 3, columns=['A', 'B'])
>>> df
A B
0 4 9
1 4 9
2 4 9
Using a numpy universal function (in this case the same as
``np.sqrt(df)``):
>>> def sqrt(x) -> ks.Series[float]:
... return np.sqrt(x)
...
>>> df.apply(sqrt, axis=0)
A B
0 2.0 3.0
1 2.0 3.0
2 2.0 3.0
You can omit the type hint and let Koalas infer its type.
>>> df.apply(np.sqrt, axis=0)
A B
0 2.0 3.0
1 2.0 3.0
2 2.0 3.0
When `axis` is 1 or 'columns', it applies the function for each row.
>>> def summation(x) -> np.int64:
... return np.sum(x)
...
>>> df.apply(summation, axis=1)
0 13
1 13
2 13
Name: 0, dtype: int64
Likewise, you can omit the type hint and let Koalas infer its type.
>>> df.apply(np.sum, axis=1)
0 13
1 13
2 13
Name: 0, dtype: int64
Returning a list-like will result in a Series
>>> df.apply(lambda x: [1, 2], axis=1)
0 [1, 2]
1 [1, 2]
2 [1, 2]
Name: 0, dtype: object
In order to specify the types when `axis` is '1', it should use DataFrame[...]
annotation. In this case, the column names are automatically generated.
>>> def identify(x) -> ks.DataFrame[np.int64, np.int64]:
... return x
...
>>> df.apply(identify, axis=1)
c0 c1
0 4 9
1 4 9
2 4 9
"""
from databricks.koalas.groupby import GroupBy
from databricks.koalas.series import _col
if isinstance(func, np.ufunc):
f = func
func = lambda *args, **kwargs: f(*args, **kwargs)
assert callable(func), "the first argument should be a callable function."
axis = validate_axis(axis)
should_return_series = False
spec = inspect.getfullargspec(func)
return_sig = spec.annotations.get("return", None)
should_infer_schema = return_sig is None
def apply_func(pdf):
pdf_or_pser = pdf.apply(func, axis=axis)
if isinstance(pdf_or_pser, pd.Series):
return pdf_or_pser.to_frame()
else:
return pdf_or_pser
if should_infer_schema:
# Here we execute with the first 1000 to get the return type.
# If the records were less than 1000, it uses pandas API directly for a shortcut.
limit = get_option("compute.shortcut_limit")
pdf = self.head(limit + 1)._to_internal_pandas()
applied = pdf.apply(func, axis=axis)
kser_or_kdf = ks.from_pandas(applied)
if len(pdf) <= limit:
return kser_or_kdf
kdf = kser_or_kdf
if isinstance(kser_or_kdf, ks.Series):
should_return_series = True
kdf = kser_or_kdf.to_frame()
return_schema = kdf._internal._sdf.drop(*HIDDEN_COLUMNS).schema
sdf = GroupBy._spark_group_map_apply(
self, apply_func, (F.spark_partition_id(),),
return_schema, retain_index=True)
# If schema is inferred, we can restore indexes too.
internal = kdf._internal.copy(sdf=sdf,
column_scols=[scol_for(sdf, col)
for col in kdf._internal.data_columns])
else:
return_schema = _infer_return_type(func).tpe
require_index_axis = getattr(return_sig, "__origin__", None) == ks.Series
require_column_axis = getattr(return_sig, "__origin__", None) == ks.DataFrame
if require_index_axis:
if axis != 0:
raise TypeError(
"The given function should specify a scalar or a series as its type "
"hints when axis is 0 or 'index'; however, the return type "
"was %s" % return_sig)
fields_types = zip(self.columns, [return_schema] * len(self.columns))
return_schema = StructType([StructField(c, t) for c, t in fields_types])
elif require_column_axis:
if axis != 1:
raise TypeError(
"The given function should specify a scalar or a frame as its type "
"hints when axis is 1 or 'column'; however, the return type "
"was %s" % return_sig)
else:
# any axis is fine.
should_return_series = True
return_schema = StructType([StructField("0", return_schema)])
sdf = GroupBy._spark_group_map_apply(
self, apply_func, (F.spark_partition_id(),),
return_schema, retain_index=False)
# Otherwise, it loses index.
internal = _InternalFrame(sdf=sdf, index_map=None)
result = DataFrame(internal)
if should_return_series:
return _col(result)
else:
return result
def transform(self, func):
"""
Call ``func`` on self producing a Series with transformed values
and that has the same length as its input.
.. note:: this API executes the function once to infer the type which is
potentially expensive, for instance, when the dataset is created after
aggregations or sorting.
To avoid this, specify return type in ``func``, for instance, as below:
>>> def square(x) -> ks.Series[np.int32]:
... return x ** 2
Koalas uses return type hint and does not try to infer the type.
.. note:: the series within ``func`` is actually multiple pandas series as the
segments of the whole Koalas series; therefore, the length of each series
is not guaranteed. As an example, an aggregation against each series
does work as a global aggregation but an aggregation of each segment. See
below:
>>> def func(x) -> ks.Series[np.int32]:
... return x + sum(x)
Parameters
----------
func : function
Function to use for transforming the data. It must work when pandas Series
is passed.
Returns
-------
DataFrame
A DataFrame that must have the same length as self.
Raises
------
Exception : If the returned DataFrame has a different length than self.
Examples
--------
>>> df = ks.DataFrame({'A': range(3), 'B': range(1, 4)}, columns=['A', 'B'])
>>> df
A B
0 0 1
1 1 2
2 2 3
>>> def square(x) -> ks.Series[np.int32]:
... return x ** 2
>>> df.transform(square)
A B
0 0 1
1 1 4
2 4 9
You can omit the type hint and let Koalas infer its type.
>>> df.transform(lambda x: x ** 2)
A B
0 0 1
1 1 4
2 4 9
For multi-index columns:
>>> df.columns = [('X', 'A'), ('X', 'B')]
>>> df.transform(square) # doctest: +NORMALIZE_WHITESPACE
X
A B
0 0 1
1 1 4
2 4 9
>>> df.transform(lambda x: x ** 2) # doctest: +NORMALIZE_WHITESPACE
X
A B
0 0 1
1 1 4
2 4 9
"""
assert callable(func), "the first argument should be a callable function."
spec = inspect.getfullargspec(func)
return_sig = spec.annotations.get("return", None)
should_infer_schema = return_sig is None
if should_infer_schema:
# Here we execute with the first 1000 to get the return type.
# If the records were less than 1000, it uses pandas API directly for a shortcut.
limit = get_option("compute.shortcut_limit")
pdf = self.head(limit + 1)._to_internal_pandas()
transformed = pdf.transform(func)
kdf = DataFrame(transformed)
if len(pdf) <= limit:
return kdf
applied = []
for input_label, output_label in zip(
self._internal.column_labels, kdf._internal.column_labels):
wrapped = ks.pandas_wraps(
func,
return_col=as_python_type(kdf[output_label].spark_type))
applied.append(wrapped(self[input_label]).rename(input_label))
internal = self._internal.with_new_columns(applied)
return DataFrame(internal)
else:
wrapped = ks.pandas_wraps(func)
return self._apply_series_op(lambda kser: wrapped(kser).rename(kser.name))
def pop(self, item):
"""
Return item and drop from frame. Raise KeyError if not found.
Parameters
----------
item : str
Label of column to be popped.
Returns
-------
Series
Examples
--------
>>> df = ks.DataFrame([('falcon', 'bird', 389.0),
... ('parrot', 'bird', 24.0),
... ('lion', 'mammal', 80.5),
... ('monkey','mammal', np.nan)],
... columns=('name', 'class', 'max_speed'))
>>> df
name class max_speed
0 falcon bird 389.0
1 parrot bird 24.0
2 lion mammal 80.5
3 monkey mammal NaN
>>> df.pop('class')
0 bird
1 bird
2 mammal
3 mammal
Name: class, dtype: object
>>> df
name max_speed
0 falcon 389.0
1 parrot 24.0
2 lion 80.5
3 monkey NaN
Also support for MultiIndex
>>> df = ks.DataFrame([('falcon', 'bird', 389.0),
... ('parrot', 'bird', 24.0),
... ('lion', 'mammal', 80.5),
... ('monkey','mammal', np.nan)],
... columns=('name', 'class', 'max_speed'))
>>> columns = [('a', 'name'), ('a', 'class'), ('b', 'max_speed')]
>>> df.columns = pd.MultiIndex.from_tuples(columns)
>>> df
a b
name class max_speed
0 falcon bird 389.0
1 parrot bird 24.0
2 lion mammal 80.5
3 monkey mammal NaN
>>> df.pop('a')
name class
0 falcon bird
1 parrot bird
2 lion mammal
3 monkey mammal
>>> df
b
max_speed
0 389.0
1 24.0
2 80.5
3 NaN
"""
result = self[item]
self._internal = self.drop(item)._internal
return result
# TODO: add axis parameter can work when '1' or 'columns'
def xs(self, key, axis=0, level=None):
"""
Return cross-section from the DataFrame.
This method takes a `key` argument to select data at a particular
level of a MultiIndex.
Parameters
----------
key : label or tuple of label
Label contained in the index, or partially in a MultiIndex.
axis : 0 or 'index', default 0
Axis to retrieve cross-section on.
currently only support 0 or 'index'
level : object, defaults to first n levels (n=1 or len(key))
In case of a key partially contained in a MultiIndex, indicate
which levels are used. Levels can be referred by label or position.
Returns
-------
DataFrame
Cross-section from the original DataFrame
corresponding to the selected index levels.
See Also
--------
DataFrame.loc : Access a group of rows and columns
by label(s) or a boolean array.
DataFrame.iloc : Purely integer-location based indexing
for selection by position.
Examples
--------
>>> d = {'num_legs': [4, 4, 2, 2],
... 'num_wings': [0, 0, 2, 2],
... 'class': ['mammal', 'mammal', 'mammal', 'bird'],
... 'animal': ['cat', 'dog', 'bat', 'penguin'],
... 'locomotion': ['walks', 'walks', 'flies', 'walks']}
>>> df = ks.DataFrame(data=d)
>>> df = df.set_index(['class', 'animal', 'locomotion'])
>>> df # doctest: +NORMALIZE_WHITESPACE
num_legs num_wings
class animal locomotion
mammal cat walks 4 0
dog walks 4 0
bat flies 2 2
bird penguin walks 2 2
Get values at specified index
>>> df.xs('mammal') # doctest: +NORMALIZE_WHITESPACE
num_legs num_wings
animal locomotion
cat walks 4 0
dog walks 4 0
bat flies 2 2
Get values at several indexes
>>> df.xs(('mammal', 'dog')) # doctest: +NORMALIZE_WHITESPACE
num_legs num_wings
locomotion
walks 4 0
Get values at specified index and level
>>> df.xs('cat', level=1) # doctest: +NORMALIZE_WHITESPACE
num_legs num_wings
class locomotion
mammal walks 4 0
"""
from databricks.koalas.series import _col
if not isinstance(key, (str, tuple)):
raise ValueError("'key' should be string or tuple that contains strings")
if not all(isinstance(index, str) for index in key):
raise ValueError("'key' should have index names as only strings "
"or a tuple that contain index names as only strings")
axis = validate_axis(axis)
if axis != 0:
raise NotImplementedError('axis should be either 0 or "index" currently.')
if isinstance(key, str):
key = (key,)
if len(key) > len(self._internal.index_scols):
raise KeyError("Key length ({}) exceeds index depth ({})"
.format(len(key), len(self._internal.index_scols)))
if level is None:
level = 0
scols = self._internal.scols[:level] + self._internal.scols[level+len(key):]
rows = [self._internal.scols[lvl] == index
for lvl, index in enumerate(key, level)]
sdf = self._sdf.select(scols + list(HIDDEN_COLUMNS)) \
.drop(NATURAL_ORDER_COLUMN_NAME) \
.filter(reduce(lambda x, y: x & y, rows))
if len(key) == len(self._internal.index_scols):
result = _col(DataFrame(_InternalFrame(sdf=sdf, index_map=None)).T)
result.name = key
else:
internal = self._internal.copy(
sdf=sdf,
index_map=self._internal.index_map[:level] +
self._internal.index_map[level+len(key):])
result = DataFrame(internal)
return result
def where(self, cond, other=np.nan):
"""
Replace values where the condition is False.
Parameters
----------
cond : boolean DataFrame
Where cond is True, keep the original value. Where False,
replace with corresponding value from other.
other : scalar, DataFrame
Entries where cond is False are replaced with corresponding value from other.
Returns
-------
DataFrame
Examples
--------
>>> from databricks.koalas.config import set_option, reset_option
>>> set_option("compute.ops_on_diff_frames", True)
>>> df1 = ks.DataFrame({'A': [0, 1, 2, 3, 4], 'B':[100, 200, 300, 400, 500]})
>>> df2 = ks.DataFrame({'A': [0, -1, -2, -3, -4], 'B':[-100, -200, -300, -400, -500]})
>>> df1
A B
0 0 100
1 1 200
2 2 300
3 3 400
4 4 500
>>> df2
A B
0 0 -100
1 -1 -200
2 -2 -300
3 -3 -400
4 -4 -500
>>> df1.where(df1 > 0).sort_index()
A B
0 NaN 100.0
1 1.0 200.0
2 2.0 300.0
3 3.0 400.0
4 4.0 500.0
>>> df1.where(df1 > 1, 10).sort_index()
A B
0 10 100
1 10 200
2 2 300
3 3 400
4 4 500
>>> df1.where(df1 > 1, df1 + 100).sort_index()
A B
0 100 100
1 101 200
2 2 300
3 3 400
4 4 500
>>> df1.where(df1 > 1, df2).sort_index()
A B
0 0 100
1 -1 200
2 2 300
3 3 400
4 4 500
When the column name of cond is different from self, it treats all values are False
>>> cond = ks.DataFrame({'C': [0, -1, -2, -3, -4], 'D':[4, 3, 2, 1, 0]}) % 3 == 0
>>> cond
C D
0 True False
1 False True
2 False False
3 True False
4 False True
>>> df1.where(cond).sort_index()
A B
0 NaN NaN
1 NaN NaN
2 NaN NaN
3 NaN NaN
4 NaN NaN
When the type of cond is Series, it just check boolean regardless of column name
>>> cond = ks.Series([1, 2]) > 1
>>> cond
0 False
1 True
Name: 0, dtype: bool
>>> df1.where(cond).sort_index()
A B
0 NaN NaN
1 1.0 200.0
2 NaN NaN
3 NaN NaN
4 NaN NaN
>>> reset_option("compute.ops_on_diff_frames")
"""
from databricks.koalas.series import Series
tmp_cond_col_name = '__tmp_cond_col_{}__'.format
tmp_other_col_name = '__tmp_other_col_{}__'.format
kdf = self.copy()
tmp_cond_col_names = [tmp_cond_col_name(name_like_string(label))
for label in self._internal.column_labels]
if isinstance(cond, DataFrame):
cond = cond[[(cond._internal.scol_for(label)
if label in cond._internal.column_labels else F.lit(False)).alias(name)
for label, name
in zip(self._internal.column_labels, tmp_cond_col_names)]]
kdf[tmp_cond_col_names] = cond
elif isinstance(cond, Series):
cond = cond.to_frame()
cond = cond[[cond._internal.column_scols[0].alias(name) for name in tmp_cond_col_names]]
kdf[tmp_cond_col_names] = cond
else:
raise ValueError("type of cond must be a DataFrame or Series")
tmp_other_col_names = [tmp_other_col_name(name_like_string(label))
for label in self._internal.column_labels]
if isinstance(other, DataFrame):
other = other[[(other._internal.scol_for(label)
if label in other._internal.column_labels else F.lit(np.nan))
.alias(name)
for label, name
in zip(self._internal.column_labels, tmp_other_col_names)]]
kdf[tmp_other_col_names] = other
elif isinstance(other, Series):
other = other.to_frame()
other = other[[other._internal.column_scols[0].alias(name)
for name in tmp_other_col_names]]
kdf[tmp_other_col_names] = other
else:
for label in self._internal.column_labels:
kdf[tmp_other_col_name(name_like_string(label))] = other
# above logic make spark dataframe looks like below:
# +-----------------+---+---+------------------+-------------------+------------------+--...
# |__index_level_0__| A| B|__tmp_cond_col_A__|__tmp_other_col_A__|__tmp_cond_col_B__|__...
# +-----------------+---+---+------------------+-------------------+------------------+--...
# | 0| 0|100| true| 0| false| ...
# | 1| 1|200| false| -1| false| ...
# | 3| 3|400| true| -3| false| ...
# | 2| 2|300| false| -2| true| ...
# | 4| 4|500| false| -4| false| ...
# +-----------------+---+---+------------------+-------------------+------------------+--...
column_scols = []
for label in self._internal.column_labels:
column_scols.append(
F.when(
kdf[tmp_cond_col_name(name_like_string(label))]._scol,
kdf[label]._scol
).otherwise(
kdf[tmp_other_col_name(name_like_string(label))]._scol
).alias(kdf._internal.column_name_for(label)))
return DataFrame(kdf._internal.with_new_columns(column_scols,
column_labels=self._internal.column_labels))
def mask(self, cond, other=np.nan):
"""
Replace values where the condition is True.
Parameters
----------
cond : boolean DataFrame
Where cond is False, keep the original value. Where True,
replace with corresponding value from other.
other : scalar, DataFrame
Entries where cond is True are replaced with corresponding value from other.
Returns
-------
DataFrame
Examples
--------
>>> from databricks.koalas.config import set_option, reset_option
>>> set_option("compute.ops_on_diff_frames", True)
>>> df1 = ks.DataFrame({'A': [0, 1, 2, 3, 4], 'B':[100, 200, 300, 400, 500]})
>>> df2 = ks.DataFrame({'A': [0, -1, -2, -3, -4], 'B':[-100, -200, -300, -400, -500]})
>>> df1
A B
0 0 100
1 1 200
2 2 300
3 3 400
4 4 500
>>> df2
A B
0 0 -100
1 -1 -200
2 -2 -300
3 -3 -400
4 -4 -500
>>> df1.mask(df1 > 0).sort_index()
A B
0 0.0 NaN
1 NaN NaN
2 NaN NaN
3 NaN NaN
4 NaN NaN
>>> df1.mask(df1 > 1, 10).sort_index()
A B
0 0 10
1 1 10
2 10 10
3 10 10
4 10 10
>>> df1.mask(df1 > 1, df1 + 100).sort_index()
A B
0 0 200
1 1 300
2 102 400
3 103 500
4 104 600
>>> df1.mask(df1 > 1, df2).sort_index()
A B
0 0 -100
1 1 -200
2 -2 -300
3 -3 -400
4 -4 -500
>>> reset_option("compute.ops_on_diff_frames")
"""
from databricks.koalas.series import Series
if not isinstance(cond, (DataFrame, Series)):
raise ValueError("type of cond must be a DataFrame or Series")
cond_inversed = cond._apply_series_op(lambda kser: ~kser)
return self.where(cond_inversed, other)
@property
def index(self):
"""The index (row labels) Column of the DataFrame.
Currently not supported when the DataFrame has no index.
See Also
--------
Index
"""
from databricks.koalas.indexes import Index, MultiIndex
if len(self._internal.index_map) == 1:
return Index(self)
else:
return MultiIndex(self)
@property
def empty(self):
"""
Returns true if the current DataFrame is empty. Otherwise, returns false.
Examples
--------
>>> ks.range(10).empty
False
>>> ks.range(0).empty
True
>>> ks.DataFrame({}, index=list('abc')).empty
True
"""
return len(self._internal.column_labels) == 0 or self._sdf.rdd.isEmpty()
@property
def style(self):
"""
Property returning a Styler object containing methods for
building a styled HTML representation fo the DataFrame.
.. note:: currently it collects top 1000 rows and return its
pandas `pandas.io.formats.style.Styler` instance.
Examples
--------
>>> ks.range(1001).style # doctest: +ELLIPSIS
<pandas.io.formats.style.Styler object at ...>
"""
max_results = get_option('compute.max_rows')
pdf = self.head(max_results + 1).to_pandas()
if len(pdf) > max_results:
warnings.warn(
"'style' property will only use top %s rows." % max_results, UserWarning)
return pdf.head(max_results).style
def set_index(self, keys, drop=True, append=False, inplace=False):
"""Set the DataFrame index (row labels) using one or more existing columns.
Set the DataFrame index (row labels) using one or more existing
columns or arrays (of the correct length). The index can replace the
existing index or expand on it.
Parameters
----------
keys : label or array-like or list of labels/arrays
This parameter can be either a single column key, a single array of
the same length as the calling DataFrame, or a list containing an
arbitrary combination of column keys and arrays. Here, "array"
encompasses :class:`Series`, :class:`Index` and ``np.ndarray``.
drop : bool, default True
Delete columns to be used as the new index.
append : bool, default False
Whether to append columns to existing index.
inplace : bool, default False
Modify the DataFrame in place (do not create a new object).
Returns
-------
DataFrame
Changed row labels.
See Also
--------
DataFrame.reset_index : Opposite of set_index.
Examples
--------
>>> df = ks.DataFrame({'month': [1, 4, 7, 10],
... 'year': [2012, 2014, 2013, 2014],
... 'sale': [55, 40, 84, 31]},
... columns=['month', 'year', 'sale'])
>>> df
month year sale
0 1 2012 55
1 4 2014 40
2 7 2013 84
3 10 2014 31
Set the index to become the 'month' column:
>>> df.set_index('month') # doctest: +NORMALIZE_WHITESPACE
year sale
month
1 2012 55
4 2014 40
7 2013 84
10 2014 31
Create a MultiIndex using columns 'year' and 'month':
>>> df.set_index(['year', 'month']) # doctest: +NORMALIZE_WHITESPACE
sale
year month
2012 1 55
2014 4 40
2013 7 84
2014 10 31
"""
if isinstance(keys, (str, tuple)):
keys = [keys]
else:
keys = list(keys)
columns = set(self.columns)
for key in keys:
if key not in columns:
raise KeyError(key)
keys = [key if isinstance(key, tuple) else (key,) for key in keys]
if drop:
column_labels = [label for label in self._internal.column_labels if label not in keys]
else:
column_labels = self._internal.column_labels
if append:
index_map = self._internal.index_map + [(self._internal.column_name_for(label), label)
for label in keys]
else:
index_map = [(self._internal.column_name_for(label), label) for label in keys]
internal = self._internal.copy(index_map=index_map,
column_labels=column_labels,
column_scols=[self._internal.scol_for(label)
for label in column_labels])
if inplace:
self._internal = internal
else:
return DataFrame(internal)
def reset_index(self, level=None, drop=False, inplace=False, col_level=0, col_fill=''):
"""Reset the index, or a level of it.
For DataFrame with multi-level index, return new DataFrame with labeling information in
the columns under the index names, defaulting to 'level_0', 'level_1', etc. if any are None.
For a standard index, the index name will be used (if set), otherwise a default 'index' or
'level_0' (if 'index' is already taken) will be used.
Parameters
----------
level : int, str, tuple, or list, default None
Only remove the given levels from the index. Removes all levels by
default.
drop : bool, default False
Do not try to insert index into dataframe columns. This resets
the index to the default integer index.
inplace : bool, default False
Modify the DataFrame in place (do not create a new object).
col_level : int or str, default 0
If the columns have multiple levels, determines which level the
labels are inserted into. By default it is inserted into the first
level.
col_fill : object, default ''
If the columns have multiple levels, determines how the other
levels are named. If None then the index name is repeated.
Returns
-------
DataFrame
DataFrame with the new index.
See Also
--------
DataFrame.set_index : Opposite of reset_index.
Examples
--------
>>> df = ks.DataFrame([('bird', 389.0),
... ('bird', 24.0),
... ('mammal', 80.5),
... ('mammal', np.nan)],
... index=['falcon', 'parrot', 'lion', 'monkey'],
... columns=('class', 'max_speed'))
>>> df
class max_speed
falcon bird 389.0
parrot bird 24.0
lion mammal 80.5
monkey mammal NaN
When we reset the index, the old index is added as a column. Unlike pandas, Koalas
does not automatically add a sequential index. The following 0, 1, 2, 3 are only
there when we display the DataFrame.
>>> df.reset_index()
index class max_speed
0 falcon bird 389.0
1 parrot bird 24.0
2 lion mammal 80.5
3 monkey mammal NaN
We can use the `drop` parameter to avoid the old index being added as
a column:
>>> df.reset_index(drop=True)
class max_speed
0 bird 389.0
1 bird 24.0
2 mammal 80.5
3 mammal NaN
You can also use `reset_index` with `MultiIndex`.
>>> index = pd.MultiIndex.from_tuples([('bird', 'falcon'),
... ('bird', 'parrot'),
... ('mammal', 'lion'),
... ('mammal', 'monkey')],
... names=['class', 'name'])
>>> columns = pd.MultiIndex.from_tuples([('speed', 'max'),
... ('species', 'type')])
>>> df = ks.DataFrame([(389.0, 'fly'),
... ( 24.0, 'fly'),
... ( 80.5, 'run'),
... (np.nan, 'jump')],
... index=index,
... columns=columns)
>>> df # doctest: +NORMALIZE_WHITESPACE
speed species
max type
class name
bird falcon 389.0 fly
parrot 24.0 fly
mammal lion 80.5 run
monkey NaN jump
If the index has multiple levels, we can reset a subset of them:
>>> df.reset_index(level='class') # doctest: +NORMALIZE_WHITESPACE
class speed species
max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump
If we are not dropping the index, by default, it is placed in the top
level. We can place it in another level:
>>> df.reset_index(level='class', col_level=1) # doctest: +NORMALIZE_WHITESPACE
speed species
class max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump
When the index is inserted under another level, we can specify under
which one with the parameter `col_fill`:
>>> df.reset_index(level='class', col_level=1,
... col_fill='species') # doctest: +NORMALIZE_WHITESPACE
species speed species
class max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump
If we specify a nonexistent level for `col_fill`, it is created:
>>> df.reset_index(level='class', col_level=1,
... col_fill='genus') # doctest: +NORMALIZE_WHITESPACE
genus speed species
class max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump
"""
multi_index = len(self._internal.index_map) > 1
def rename(index):
if multi_index:
return ('level_{}'.format(index),)
else:
if ('index',) not in self._internal.column_labels:
return ('index',)
else:
return ('level_{}'.format(index),)
if level is None:
new_index_map = [(column, name if name is not None else rename(i))
for i, (column, name) in enumerate(self._internal.index_map)]
index_map = []
else:
if isinstance(level, (int, str)):
level = [level]
level = list(level)
if all(isinstance(l, int) for l in level):
for lev in level:
if lev >= len(self._internal.index_map):
raise IndexError('Too many levels: Index has only {} level, not {}'
.format(len(self._internal.index_map), lev + 1))
idx = level
elif all(isinstance(lev, str) for lev in level):
idx = []
for l in level:
try:
i = self._internal.index_columns.index(l)
idx.append(i)
except ValueError:
if multi_index:
raise KeyError('Level unknown not found')
else:
raise KeyError('Level unknown must be same as name ({})'
.format(self._internal.index_columns[0]))
else:
raise ValueError('Level should be all int or all string.')
idx.sort()
new_index_map = []
index_map = self._internal.index_map.copy()
for i in idx:
info = self._internal.index_map[i]
index_column, index_name = info
new_index_map.append(
(index_column,
index_name if index_name is not None else rename(i)))
index_map.remove(info)
new_data_scols = [self._internal.scol_for(column).alias(name_like_string(name))
for column, name in new_index_map]
if len(index_map) > 0:
index_scols = [scol_for(self._sdf, column) for column, _ in index_map]
sdf = self._sdf.select(
index_scols + new_data_scols + self._internal.column_scols +
list(HIDDEN_COLUMNS))
else:
sdf = self._sdf.select(
new_data_scols + self._internal.column_scols + list(HIDDEN_COLUMNS))
# Now, new internal Spark columns are named as same as index name.
new_index_map = [(column, name) for column, name in new_index_map]
sdf = _InternalFrame.attach_default_index(sdf)
index_map = [(SPARK_DEFAULT_INDEX_NAME, None)]
if drop:
new_index_map = []
if self._internal.column_labels_level > 1:
column_depth = len(self._internal.column_labels[0])
if col_level >= column_depth:
raise IndexError('Too many levels: Index has only {} levels, not {}'
.format(column_depth, col_level + 1))
if any(col_level + len(name) > column_depth for _, name in new_index_map):
raise ValueError('Item must have length equal to number of levels.')
column_labels = ([tuple(([col_fill] * col_level)
+ list(name)
+ ([col_fill] * (column_depth - (len(name) + col_level))))
for _, name in new_index_map]
+ self._internal.column_labels)
else:
column_labels = [name for _, name in new_index_map] + self._internal.column_labels
internal = self._internal.copy(
sdf=sdf,
index_map=index_map,
column_labels=column_labels,
column_scols=([scol_for(sdf, name_like_string(name)) for _, name in new_index_map]
+ [scol_for(sdf, col) for col in self._internal.data_columns]))
if inplace:
self._internal = internal
else:
return DataFrame(internal)
def isnull(self):
"""
Detects missing values for items in the current Dataframe.
Return a boolean same-sized Dataframe indicating if the values are NA.
NA values, such as None or numpy.NaN, gets mapped to True values.
Everything else gets mapped to False values.
See Also
--------
Dataframe.notnull
Examples
--------
>>> df = ks.DataFrame([(.2, .3), (.0, None), (.6, None), (.2, .1)])
>>> df.isnull()
0 1
0 False False
1 False True
2 False True
3 False False
>>> df = ks.DataFrame([[None, 'bee', None], ['dog', None, 'fly']])
>>> df.isnull()
0 1 2
0 True False True
1 False True False
"""
return self._apply_series_op(lambda kser: kser.isnull())
isna = isnull
def notnull(self):
"""
Detects non-missing values for items in the current Dataframe.
This function takes a dataframe and indicates whether it's
values are valid (not missing, which is ``NaN`` in numeric
datatypes, ``None`` or ``NaN`` in objects and ``NaT`` in datetimelike).
See Also
--------
Dataframe.isnull
Examples
--------
>>> df = ks.DataFrame([(.2, .3), (.0, None), (.6, None), (.2, .1)])
>>> df.notnull()
0 1
0 True True
1 True False
2 True False
3 True True
>>> df = ks.DataFrame([['ant', 'bee', 'cat'], ['dog', None, 'fly']])
>>> df.notnull()
0 1 2
0 True True True
1 True False True
"""
return self._apply_series_op(lambda kser: kser.notnull())
notna = notnull
# TODO: add frep and axis parameter
def shift(self, periods=1, fill_value=None):
"""
Shift DataFrame by desired number of periods.
.. note:: the current implementation of shift uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
periods : int
Number of periods to shift. Can be positive or negative.
fill_value : object, optional
The scalar value to use for newly introduced missing values.
The default depends on the dtype of self. For numeric data, np.nan is used.
Returns
-------
Copy of input DataFrame, shifted.
Examples
--------
>>> df = ks.DataFrame({'Col1': [10, 20, 15, 30, 45],
... 'Col2': [13, 23, 18, 33, 48],
... 'Col3': [17, 27, 22, 37, 52]},
... columns=['Col1', 'Col2', 'Col3'])
>>> df.shift(periods=3)
Col1 Col2 Col3
0 NaN NaN NaN
1 NaN NaN NaN
2 NaN NaN NaN
3 10.0 13.0 17.0
4 20.0 23.0 27.0
>>> df.shift(periods=3, fill_value=0)
Col1 Col2 Col3
0 0 0 0
1 0 0 0
2 0 0 0
3 10 13 17
4 20 23 27
"""
return self._apply_series_op(lambda kser: kser.shift(periods, fill_value))
# TODO: axis should support 1 or 'columns' either at this moment
def diff(self, periods: int = 1, axis: Union[int, str] = 0):
"""
First discrete difference of element.
Calculates the difference of a DataFrame element compared with another element in the
DataFrame (default is the element in the same column of the previous row).
.. note:: the current implementation of diff uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
periods : int, default 1
Periods to shift for calculating difference, accepts negative values.
axis : int, default 0 or 'index'
Can only be set to 0 at the moment.
Returns
-------
diffed : DataFrame
Examples
--------
>>> df = ks.DataFrame({'a': [1, 2, 3, 4, 5, 6],
... 'b': [1, 1, 2, 3, 5, 8],
... 'c': [1, 4, 9, 16, 25, 36]}, columns=['a', 'b', 'c'])
>>> df
a b c
0 1 1 1
1 2 1 4
2 3 2 9
3 4 3 16
4 5 5 25
5 6 8 36
>>> df.diff()
a b c
0 NaN NaN NaN
1 1.0 0.0 3.0
2 1.0 1.0 5.0
3 1.0 1.0 7.0
4 1.0 2.0 9.0
5 1.0 3.0 11.0
Difference with previous column
>>> df.diff(periods=3)
a b c
0 NaN NaN NaN
1 NaN NaN NaN
2 NaN NaN NaN
3 3.0 2.0 15.0
4 3.0 4.0 21.0
5 3.0 6.0 27.0
Difference with following row
>>> df.diff(periods=-1)
a b c
0 -1.0 0.0 -3.0
1 -1.0 -1.0 -5.0
2 -1.0 -1.0 -7.0
3 -1.0 -2.0 -9.0
4 -1.0 -3.0 -11.0
5 NaN NaN NaN
"""
axis = validate_axis(axis)
if axis != 0:
raise NotImplementedError('axis should be either 0 or "index" currently.')
return self._apply_series_op(lambda kser: kser.diff(periods))
# TODO: axis should support 1 or 'columns' either at this moment
def nunique(self, axis: Union[int, str] = 0, dropna: bool = True, approx: bool = False,
rsd: float = 0.05) -> pd.Series:
"""
Return number of unique elements in the object.
Excludes NA values by default.
Parameters
----------
axis : int, default 0 or 'index'
Can only be set to 0 at the moment.
dropna : bool, default True
Don’t include NaN in the count.
approx: bool, default False
If False, will use the exact algorithm and return the exact number of unique.
If True, it uses the HyperLogLog approximate algorithm, which is significantly faster
for large amount of data.
Note: This parameter is specific to Koalas and is not found in pandas.
rsd: float, default 0.05
Maximum estimation error allowed in the HyperLogLog algorithm.
Note: Just like ``approx`` this parameter is specific to Koalas.
Returns
-------
The number of unique values per column as a pandas Series.
Examples
--------
>>> df = ks.DataFrame({'A': [1, 2, 3], 'B': [np.nan, 3, np.nan]})
>>> df.nunique()
A 3
B 1
Name: 0, dtype: int64
>>> df.nunique(dropna=False)
A 3
B 2
Name: 0, dtype: int64
On big data, we recommend using the approximate algorithm to speed up this function.
The result will be very close to the exact unique count.
>>> df.nunique(approx=True)
A 3
B 1
Name: 0, dtype: int64
"""
axis = validate_axis(axis)
if axis != 0:
raise NotImplementedError('axis should be either 0 or "index" currently.')
res = self._sdf.select([self[label]._nunique(dropna, approx, rsd)
for label in self._internal.column_labels]).toPandas()
if self._internal.column_labels_level == 1:
res.columns = [label[0] for label in self._internal.column_labels]
else:
res.columns = pd.MultiIndex.from_tuples(self._internal.column_labels)
if self._internal.column_label_names is not None:
res.columns.names = self._internal.column_label_names
return res.T.iloc[:, 0]
def round(self, decimals=0):
"""
Round a DataFrame to a variable number of decimal places.
Parameters
----------
decimals : int, dict, Series
Number of decimal places to round each column to. If an int is
given, round each column to the same number of places.
Otherwise dict and Series round to variable numbers of places.
Column names should be in the keys if `decimals` is a
dict-like, or in the index if `decimals` is a Series. Any
columns not included in `decimals` will be left as is. Elements
of `decimals` which are not columns of the input will be
ignored.
.. note:: If `decimals` is a Series, it is expected to be small,
as all the data is loaded into the driver's memory.
Returns
-------
DataFrame
See Also
--------
Series.round
Examples
--------
>>> df = ks.DataFrame({'A':[0.028208, 0.038683, 0.877076],
... 'B':[0.992815, 0.645646, 0.149370],
... 'C':[0.173891, 0.577595, 0.491027]},
... columns=['A', 'B', 'C'],
... index=['first', 'second', 'third'])
>>> df
A B C
first 0.028208 0.992815 0.173891
second 0.038683 0.645646 0.577595
third 0.877076 0.149370 0.491027
>>> df.round(2)
A B C
first 0.03 0.99 0.17
second 0.04 0.65 0.58
third 0.88 0.15 0.49
>>> df.round({'A': 1, 'C': 2})
A B C
first 0.0 0.992815 0.17
second 0.0 0.645646 0.58
third 0.9 0.149370 0.49
>>> decimals = ks.Series([1, 0, 2], index=['A', 'B', 'C'])
>>> df.round(decimals)
A B C
first 0.0 1.0 0.17
second 0.0 1.0 0.58
third 0.9 0.0 0.49
"""
if isinstance(decimals, ks.Series):
decimals = {k if isinstance(k, tuple) else (k,): v
for k, v in decimals._to_internal_pandas().items()}
elif isinstance(decimals, dict):
decimals = {k if isinstance(k, tuple) else (k,): v
for k, v in decimals.items()}
elif isinstance(decimals, int):
decimals = {k: decimals for k in self._internal.column_labels}
else:
raise ValueError("decimals must be an integer, a dict-like or a Series")
def op(kser):
label = kser._internal.column_labels[0]
if label in decimals:
return F.round(kser._scol, decimals[label]).alias(kser._internal.data_columns[0])
else:
return kser
return self._apply_series_op(op)
def duplicated(self, subset=None, keep='first'):
"""
Return boolean Series denoting duplicate rows, optionally only considering certain columns.
Parameters
----------
subset : column label or sequence of labels, optional
Only consider certain columns for identifying duplicates,
by default use all of the columns
keep : {'first', 'last', False}, default 'first'
- ``first`` : Mark duplicates as ``True`` except for the first occurrence.
- ``last`` : Mark duplicates as ``True`` except for the last occurrence.
- False : Mark all duplicates as ``True``.
Returns
-------
duplicated : Series
Examples
--------
>>> df = ks.DataFrame({'a': [1, 1, 1, 3], 'b': [1, 1, 1, 4], 'c': [1, 1, 1, 5]},
... columns = ['a', 'b', 'c'])
>>> df
a b c
0 1 1 1
1 1 1 1
2 1 1 1
3 3 4 5
>>> df.duplicated().sort_index()
0 False
1 True
2 True
3 False
Name: 0, dtype: bool
Mark duplicates as ``True`` except for the last occurrence.
>>> df.duplicated(keep='last').sort_index()
0 True
1 True
2 False
3 False
Name: 0, dtype: bool
Mark all duplicates as ``True``.
>>> df.duplicated(keep=False).sort_index()
0 True
1 True
2 True
3 False
Name: 0, dtype: bool
"""
from databricks.koalas.series import _col
if len(self._internal.index_names) > 1:
raise ValueError("Now we don't support multi-index Now.")
if subset is None:
subset = self._internal.column_labels
else:
if isinstance(subset, str):
subset = [(subset,)]
elif isinstance(subset, tuple):
subset = [subset]
else:
subset = [sub if isinstance(sub, tuple) else (sub,) for sub in subset]
diff = set(subset).difference(set(self._internal.column_labels))
if len(diff) > 0:
raise KeyError(', '.join([str(d) if len(d) > 1 else d[0] for d in diff]))
group_cols = [self._internal.column_name_for(label) for label in subset]
index_column = self._internal.index_columns[0]
if self._internal.index_names[0] is not None:
name = self._internal.index_names[0]
else:
name = ('0',)
column = name_like_string(name)
sdf = self._sdf
if column == index_column:
index_column = SPARK_DEFAULT_INDEX_NAME
sdf = sdf.select([self._internal.index_scols[0].alias(index_column)]
+ self._internal.data_scols)
if keep == 'first' or keep == 'last':
if keep == 'first':
ord_func = spark.functions.asc
else:
ord_func = spark.functions.desc
window = Window.partitionBy(group_cols) \
.orderBy(ord_func(NATURAL_ORDER_COLUMN_NAME)) \
.rowsBetween(Window.unboundedPreceding, Window.currentRow)
sdf = sdf.withColumn(column, F.row_number().over(window) > 1)
elif not keep:
window = Window.partitionBy(group_cols) \
.rowsBetween(Window.unboundedPreceding, Window.unboundedFollowing)
sdf = sdf.withColumn(column, F.count('*').over(window) > 1)
else:
raise ValueError("'keep' only support 'first', 'last' and False")
sdf = sdf.select(scol_for(sdf, index_column), scol_for(sdf, column))
return _col(DataFrame(_InternalFrame(sdf=sdf,
index_map=[(index_column,
self._internal.index_names[0])],
column_labels=[name],
column_scols=[scol_for(sdf, column)])))
def to_koalas(self, index_col: Optional[Union[str, List[str]]] = None):
"""
Converts the existing DataFrame into a Koalas DataFrame.
This method is monkey-patched into Spark's DataFrame and can be used
to convert a Spark DataFrame into a Koalas DataFrame. If running on
an existing Koalas DataFrame, the method returns itself.
If a Koalas DataFrame is converted to a Spark DataFrame and then back
to Koalas, it will lose the index information and the original index
will be turned into a normal column.
Parameters
----------
index_col: str or list of str, optional, default: None
Index column of table in Spark.
See Also
--------
DataFrame.to_spark
Examples
--------
>>> df = ks.DataFrame({'col1': [1, 2], 'col2': [3, 4]}, columns=['col1', 'col2'])
>>> df
col1 col2
0 1 3
1 2 4
>>> spark_df = df.to_spark()
>>> spark_df
DataFrame[col1: bigint, col2: bigint]
>>> kdf = spark_df.to_koalas()
>>> kdf
col1 col2
0 1 3
1 2 4
We can specify the index columns.
>>> kdf = spark_df.to_koalas(index_col='col1')
>>> kdf # doctest: +NORMALIZE_WHITESPACE
col2
col1
1 3
2 4
Calling to_koalas on a Koalas DataFrame simply returns itself.
>>> df.to_koalas()
col1 col2
0 1 3
1 2 4
"""
if isinstance(self, DataFrame):
return self
else:
assert isinstance(self, spark.DataFrame), type(self)
from databricks.koalas.namespace import _get_index_map
index_map = _get_index_map(self, index_col)
internal = _InternalFrame(sdf=self, index_map=index_map)
return DataFrame(internal)
def cache(self):
"""
Yields and caches the current DataFrame.
The Koalas DataFrame is yielded as a protected resource and its corresponding
data is cached which gets uncached after execution goes of the context.
Examples
--------
>>> df = ks.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)],
... columns=['dogs', 'cats'])
>>> df
dogs cats
0 0.2 0.3
1 0.0 0.6
2 0.6 0.0
3 0.2 0.1
>>> with df.cache() as cached_df:
... print(cached_df.count())
...
dogs 4
cats 4
dtype: int64
>>> df = df.cache()
>>> df.to_pandas().mean(axis=1)
0 0.25
1 0.30
2 0.30
3 0.15
dtype: float64
To uncache the dataframe, use `unpersist` function
>>> df.unpersist()
"""
return _CachedDataFrame(self._internal)
def to_table(self, name: str, format: Optional[str] = None, mode: str = 'overwrite',
partition_cols: Union[str, List[str], None] = None,
**options):
"""
Write the DataFrame into a Spark table.
Parameters
----------
name : str, required
Table name in Spark.
format : string, optional
Specifies the output data source format. Some common ones are:
- 'delta'
- 'parquet'
- 'orc'
- 'json'
- 'csv'
mode : str {'append', 'overwrite', 'ignore', 'error', 'errorifexists'}, default
'overwrite'. Specifies the behavior of the save operation when the table exists
already.
- 'append': Append the new data to existing data.
- 'overwrite': Overwrite existing data.
- 'ignore': Silently ignore this operation if data already exists.
- 'error' or 'errorifexists': Throw an exception if data already exists.
partition_cols : str or list of str, optional, default None
Names of partitioning columns
options
Additional options passed directly to Spark.
See Also
--------
read_table
DataFrame.to_spark_io
DataFrame.to_parquet
Examples
--------
>>> df = ks.DataFrame(dict(
... date=list(pd.date_range('2012-1-1 12:00:00', periods=3, freq='M')),
... country=['KR', 'US', 'JP'],
... code=[1, 2 ,3]), columns=['date', 'country', 'code'])
>>> df
date country code
0 2012-01-31 12:00:00 KR 1
1 2012-02-29 12:00:00 US 2
2 2012-03-31 12:00:00 JP 3
>>> df.to_table('%s.my_table' % db, partition_cols='date')
"""
self.to_spark().write.saveAsTable(name=name, format=format, mode=mode,
partitionBy=partition_cols, **options)
def to_delta(self, path: str, mode: str = 'overwrite',
partition_cols: Union[str, List[str], None] = None, **options):
"""
Write the DataFrame out as a Delta Lake table.
Parameters
----------
path : str, required
Path to write to.
mode : str {'append', 'overwrite', 'ignore', 'error', 'errorifexists'}, default
'overwrite'. Specifies the behavior of the save operation when the destination
exists already.
- 'append': Append the new data to existing data.
- 'overwrite': Overwrite existing data.
- 'ignore': Silently ignore this operation if data already exists.
- 'error' or 'errorifexists': Throw an exception if data already exists.
partition_cols : str or list of str, optional, default None
Names of partitioning columns
options : dict
All other options passed directly into Delta Lake.
See Also
--------
read_delta
DataFrame.to_parquet
DataFrame.to_table
DataFrame.to_spark_io
Examples
--------
>>> df = ks.DataFrame(dict(
... date=list(pd.date_range('2012-1-1 12:00:00', periods=3, freq='M')),
... country=['KR', 'US', 'JP'],
... code=[1, 2 ,3]), columns=['date', 'country', 'code'])
>>> df
date country code
0 2012-01-31 12:00:00 KR 1
1 2012-02-29 12:00:00 US 2
2 2012-03-31 12:00:00 JP 3
Create a new Delta Lake table, partitioned by one column:
>>> df.to_delta('%s/to_delta/foo' % path, partition_cols='date')
Partitioned by two columns:
>>> df.to_delta('%s/to_delta/bar' % path, partition_cols=['date', 'country'])
Overwrite an existing table's partitions, using the 'replaceWhere' capability in Delta:
>>> df.to_delta('%s/to_delta/bar' % path,
... mode='overwrite', replaceWhere='date >= "2012-01-01"')
"""
self.to_spark_io(
path=path, mode=mode, format="delta", partition_cols=partition_cols, **options)
def to_parquet(self, path: str, mode: str = 'overwrite',
partition_cols: Union[str, List[str], None] = None,
compression: Optional[str] = None):
"""
Write the DataFrame out as a Parquet file or directory.
Parameters
----------
path : str, required
Path to write to.
mode : str {'append', 'overwrite', 'ignore', 'error', 'errorifexists'},
default 'overwrite'. Specifies the behavior of the save operation when the
destination exists already.
- 'append': Append the new data to existing data.
- 'overwrite': Overwrite existing data.
- 'ignore': Silently ignore this operation if data already exists.
- 'error' or 'errorifexists': Throw an exception if data already exists.
partition_cols : str or list of str, optional, default None
Names of partitioning columns
compression : str {'none', 'uncompressed', 'snappy', 'gzip', 'lzo', 'brotli', 'lz4', 'zstd'}
Compression codec to use when saving to file. If None is set, it uses the
value specified in `spark.sql.parquet.compression.codec`.
See Also
--------
read_parquet
DataFrame.to_delta
DataFrame.to_table
DataFrame.to_spark_io
Examples
--------
>>> df = ks.DataFrame(dict(
... date=list(pd.date_range('2012-1-1 12:00:00', periods=3, freq='M')),
... country=['KR', 'US', 'JP'],
... code=[1, 2 ,3]), columns=['date', 'country', 'code'])
>>> df
date country code
0 2012-01-31 12:00:00 KR 1
1 2012-02-29 12:00:00 US 2
2 2012-03-31 12:00:00 JP 3
>>> df.to_parquet('%s/to_parquet/foo.parquet' % path, partition_cols='date')
>>> df.to_parquet(
... '%s/to_parquet/foo.parquet' % path,
... mode = 'overwrite',
... partition_cols=['date', 'country'])
"""
self.to_spark().write.parquet(
path=path, mode=mode, partitionBy=partition_cols, compression=compression)
def to_spark_io(self, path: Optional[str] = None, format: Optional[str] = None,
mode: str = 'overwrite', partition_cols: Union[str, List[str], None] = None,
**options):
"""Write the DataFrame out to a Spark data source.
Parameters
----------
path : string, optional
Path to the data source.
format : string, optional
Specifies the output data source format. Some common ones are:
- 'delta'
- 'parquet'
- 'orc'
- 'json'
- 'csv'
mode : str {'append', 'overwrite', 'ignore', 'error', 'errorifexists'}, default
'overwrite'. Specifies the behavior of the save operation when data already.
- 'append': Append the new data to existing data.
- 'overwrite': Overwrite existing data.
- 'ignore': Silently ignore this operation if data already exists.
- 'error' or 'errorifexists': Throw an exception if data already exists.
partition_cols : str or list of str, optional
Names of partitioning columns
options : dict
All other options passed directly into Spark's data source.
See Also
--------
read_spark_io
DataFrame.to_delta
DataFrame.to_parquet
DataFrame.to_table
Examples
--------
>>> df = ks.DataFrame(dict(
... date=list(pd.date_range('2012-1-1 12:00:00', periods=3, freq='M')),
... country=['KR', 'US', 'JP'],
... code=[1, 2 ,3]), columns=['date', 'country', 'code'])
>>> df
date country code
0 2012-01-31 12:00:00 KR 1
1 2012-02-29 12:00:00 US 2
2 2012-03-31 12:00:00 JP 3
>>> df.to_spark_io(path='%s/to_spark_io/foo.json' % path, format='json')
"""
self.to_spark().write.save(
path=path, format=format, mode=mode, partitionBy=partition_cols, **options)
def to_spark(self, index_col: Optional[Union[str, List[str]]] = None):
"""
Return the current DataFrame as a Spark DataFrame.
Parameters
----------
index_col: str or list of str, optional, default: None
Column names to be used in Spark to represent Koalas' index. The index name
in Koalas is ignored. By default, the index is always lost.
See Also
--------
DataFrame.to_koalas
Examples
--------
By default, this method loses the index as below.
>>> df = ks.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6], 'c': [7, 8, 9]})
>>> df.to_spark().show() # doctest: +NORMALIZE_WHITESPACE
+---+---+---+
| a| b| c|
+---+---+---+
| 1| 4| 7|
| 2| 5| 8|
| 3| 6| 9|
+---+---+---+
If `index_col` is set, it keeps the index column as specified.
>>> df.to_spark(index_col="index").show() # doctest: +NORMALIZE_WHITESPACE
+-----+---+---+---+
|index| a| b| c|
+-----+---+---+---+
| 0| 1| 4| 7|
| 1| 2| 5| 8|
| 2| 3| 6| 9|
+-----+---+---+---+
Keeping index column is useful when you want to call some Spark APIs and
convert it back to Koalas DataFrame without creating a default index, which
can affect performance.
>>> spark_df = df.to_spark(index_col="index")
>>> spark_df = spark_df.filter("a == 2")
>>> spark_df.to_koalas(index_col="index") # doctest: +NORMALIZE_WHITESPACE
a b c
index
1 2 5 8
In case of multi-index, specify a list to `index_col`.
>>> new_df = df.set_index("a", append=True)
>>> new_spark_df = new_df.to_spark(index_col=["index_1", "index_2"])
>>> new_spark_df.show() # doctest: +NORMALIZE_WHITESPACE
+-------+-------+---+---+
|index_1|index_2| b| c|
+-------+-------+---+---+
| 0| 1| 4| 7|
| 1| 2| 5| 8|
| 2| 3| 6| 9|
+-------+-------+---+---+
Likewise, can be converted to back to Koalas DataFrame.
>>> new_spark_df.to_koalas(
... index_col=["index_1", "index_2"]) # doctest: +NORMALIZE_WHITESPACE
b c
index_1 index_2
0 1 4 7
1 2 5 8
2 3 6 9
"""
if index_col is None:
return self._internal.spark_df
else:
if isinstance(index_col, str):
index_col = [index_col]
data_column_names = []
data_columns = []
data_columns_column_labels = \
zip(self._internal.data_columns, self._internal.column_labels)
# TODO: this code is similar with _InternalFrame.spark_df. Might have to deduplicate.
for i, (column, label) in enumerate(data_columns_column_labels):
scol = self._internal.scol_for(label)
name = str(i) if label is None else name_like_string(label)
data_column_names.append(name)
if column != name:
scol = scol.alias(name)
data_columns.append(scol)
old_index_scols = self._internal.index_scols
if len(index_col) != len(old_index_scols):
raise ValueError(
"length of index columns is %s; however, the length of the given "
"'index_col' is %s." % (len(old_index_scols), len(index_col)))
if any(col in data_column_names for col in index_col):
raise ValueError(
"'index_col' cannot be overlapped with other columns.")
sdf = self._internal.spark_internal_df
new_index_scols = [
index_scol.alias(col) for index_scol, col in zip(old_index_scols, index_col)]
return sdf.select(new_index_scols + data_columns)
def to_pandas(self):
"""
Return a pandas DataFrame.
.. note:: This method should only be used if the resulting Pandas DataFrame is expected
to be small, as all the data is loaded into the driver's memory.
Examples
--------
>>> df = ks.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)],
... columns=['dogs', 'cats'])
>>> df.to_pandas()
dogs cats
0 0.2 0.3
1 0.0 0.6
2 0.6 0.0
3 0.2 0.1
"""
return self._internal.pandas_df.copy()
# Alias to maintain backward compatibility with Spark
toPandas = to_pandas
def assign(self, **kwargs):
"""
Assign new columns to a DataFrame.
Returns a new object with all original columns in addition to new ones.
Existing columns that are re-assigned will be overwritten.
Parameters
----------
**kwargs : dict of {str: callable or Series}
The column names are keywords. If the values are
callable, they are computed on the DataFrame and
assigned to the new columns. The callable must not
change input DataFrame (though Koalas doesn't check it).
If the values are not callable, (e.g. a Series or a literal),
they are simply assigned.
Returns
-------
DataFrame
A new DataFrame with the new columns in addition to
all the existing columns.
Examples
--------
>>> df = ks.DataFrame({'temp_c': [17.0, 25.0]},
... index=['Portland', 'Berkeley'])
>>> df
temp_c
Portland 17.0
Berkeley 25.0
Where the value is a callable, evaluated on `df`:
>>> df.assign(temp_f=lambda x: x.temp_c * 9 / 5 + 32)
temp_c temp_f
Portland 17.0 62.6
Berkeley 25.0 77.0
Alternatively, the same behavior can be achieved by directly
referencing an existing Series or sequence and you can also
create multiple columns within the same assign.
>>> assigned = df.assign(temp_f=df['temp_c'] * 9 / 5 + 32,
... temp_k=df['temp_c'] + 273.15)
>>> assigned[['temp_c', 'temp_f', 'temp_k']]
temp_c temp_f temp_k
Portland 17.0 62.6 290.15
Berkeley 25.0 77.0 298.15
Notes
-----
Assigning multiple columns within the same ``assign`` is possible
but you cannot refer to newly created or modified columns. This
feature is supported in pandas for Python 3.6 and later but not in
Koalas. In Koalas, all items are computed first, and then assigned.
"""
return self._assign(kwargs)
def _assign(self, kwargs):
assert isinstance(kwargs, dict)
from databricks.koalas.series import Series
for k, v in kwargs.items():
if not (isinstance(v, (Series, spark.Column)) or
callable(v) or pd.api.types.is_scalar(v)):
raise TypeError("Column assignment doesn't support type "
"{0}".format(type(v).__name__))
if callable(v):
kwargs[k] = v(self)
pairs = {(k if isinstance(k, tuple) else (k,)):
(v._scol if isinstance(v, Series)
else v if isinstance(v, spark.Column)
else F.lit(v))
for k, v in kwargs.items()}
scols = []
for label in self._internal.column_labels:
for i in range(len(label)):
if label[:len(label)-i] in pairs:
name = self._internal.column_name_for(label)
scol = pairs[label[:len(label)-i]].alias(name)
break
else:
scol = self._internal.scol_for(label)
scols.append(scol)
column_labels = self._internal.column_labels.copy()
for label, scol in pairs.items():
if label not in set(i[:len(label)] for i in self._internal.column_labels):
scols.append(scol.alias(name_like_string(label)))
column_labels.append(label)
level = self._internal.column_labels_level
column_labels = [tuple(list(label) + ([''] * (level - len(label))))
for label in column_labels]
internal = self._internal.with_new_columns(scols, column_labels=column_labels)
return DataFrame(internal)
@staticmethod
def from_records(data: Union[np.array, List[tuple], dict, pd.DataFrame],
index: Union[str, list, np.array] = None, exclude: list = None,
columns: list = None, coerce_float: bool = False, nrows: int = None) \
-> 'DataFrame':
"""
Convert structured or record ndarray to DataFrame.
Parameters
----------
data : ndarray (structured dtype), list of tuples, dict, or DataFrame
index : string, list of fields, array-like
Field of array to use as the index, alternately a specific set of input labels to use
exclude : sequence, default None
Columns or fields to exclude
columns : sequence, default None
Column names to use. If the passed data do not have names associated with them, this
argument provides names for the columns. Otherwise this argument indicates the order of
the columns in the result (any names not found in the data will become all-NA columns)
coerce_float : boolean, default False
Attempt to convert values of non-string, non-numeric objects (like decimal.Decimal) to
floating point, useful for SQL result sets
nrows : int, default None
Number of rows to read if data is an iterator
Returns
-------
df : DataFrame
Examples
--------
Use dict as input
>>> ks.DataFrame.from_records({'A': [1, 2, 3]})
A
0 1
1 2
2 3
Use list of tuples as input
>>> ks.DataFrame.from_records([(1, 2), (3, 4)])
0 1
0 1 2
1 3 4
Use NumPy array as input
>>> ks.DataFrame.from_records(np.eye(3))
0 1 2
0 1.0 0.0 0.0
1 0.0 1.0 0.0
2 0.0 0.0 1.0
"""
return DataFrame(pd.DataFrame.from_records(data, index, exclude, columns, coerce_float,
nrows))
def to_records(self, index=True, convert_datetime64=None,
column_dtypes=None, index_dtypes=None):
"""
Convert DataFrame to a NumPy record array.
Index will be included as the first field of the record array if
requested.
.. note:: This method should only be used if the resulting NumPy ndarray is
expected to be small, as all the data is loaded into the driver's memory.
Parameters
----------
index : bool, default True
Include index in resulting record array, stored in 'index'
field or using the index label, if set.
convert_datetime64 : bool, default None
Whether to convert the index to datetime.datetime if it is a
DatetimeIndex.
column_dtypes : str, type, dict, default None
If a string or type, the data type to store all columns. If
a dictionary, a mapping of column names and indices (zero-indexed)
to specific data types.
index_dtypes : str, type, dict, default None
If a string or type, the data type to store all index levels. If
a dictionary, a mapping of index level names and indices
(zero-indexed) to specific data types.
This mapping is applied only if `index=True`.
Returns
-------
numpy.recarray
NumPy ndarray with the DataFrame labels as fields and each row
of the DataFrame as entries.
See Also
--------
DataFrame.from_records: Convert structured or record ndarray
to DataFrame.
numpy.recarray: An ndarray that allows field access using
attributes, analogous to typed columns in a
spreadsheet.
Examples
--------
>>> df = ks.DataFrame({'A': [1, 2], 'B': [0.5, 0.75]},
... index=['a', 'b'])
>>> df
A B
a 1 0.50
b 2 0.75
>>> df.to_records() # doctest: +SKIP
rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)],
dtype=[('index', 'O'), ('A', '<i8'), ('B', '<f8')])
The index can be excluded from the record array:
>>> df.to_records(index=False) # doctest: +SKIP
rec.array([(1, 0.5 ), (2, 0.75)],
dtype=[('A', '<i8'), ('B', '<f8')])
Specification of dtype for columns is new in Pandas 0.24.0.
Data types can be specified for the columns:
>>> df.to_records(column_dtypes={"A": "int32"}) # doctest: +SKIP
rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)],
dtype=[('index', 'O'), ('A', '<i4'), ('B', '<f8')])
Specification of dtype for index is new in Pandas 0.24.0.
Data types can also be specified for the index:
>>> df.to_records(index_dtypes="<S2") # doctest: +SKIP
rec.array([(b'a', 1, 0.5 ), (b'b', 2, 0.75)],
dtype=[('index', 'S2'), ('A', '<i8'), ('B', '<f8')])
"""
args = locals()
kdf = self
return validate_arguments_and_invoke_function(
kdf._to_internal_pandas(), self.to_records, pd.DataFrame.to_records, args)
def copy(self) -> 'DataFrame':
"""
Make a copy of this object's indices and data.
Returns
-------
copy : DataFrame
Examples
--------
>>> df = ks.DataFrame({'x': [1, 2], 'y': [3, 4], 'z': [5, 6], 'w': [7, 8]},
... columns=['x', 'y', 'z', 'w'])
>>> df
x y z w
0 1 3 5 7
1 2 4 6 8
>>> df_copy = df.copy()
>>> df_copy
x y z w
0 1 3 5 7
1 2 4 6 8
"""
return DataFrame(self._internal.copy())
def dropna(self, axis=0, how='any', thresh=None, subset=None, inplace=False):
"""
Remove missing values.
Parameters
----------
axis : {0 or 'index'}, default 0
Determine if rows or columns which contain missing values are
removed.
* 0, or 'index' : Drop rows which contain missing values.
how : {'any', 'all'}, default 'any'
Determine if row or column is removed from DataFrame, when we have
at least one NA or all NA.
* 'any' : If any NA values are present, drop that row or column.
* 'all' : If all values are NA, drop that row or column.
thresh : int, optional
Require that many non-NA values.
subset : array-like, optional
Labels along other axis to consider, e.g. if you are dropping rows
these would be a list of columns to include.
inplace : bool, default False
If True, do operation inplace and return None.
Returns
-------
DataFrame
DataFrame with NA entries dropped from it.
See Also
--------
DataFrame.drop : Drop specified labels from columns.
DataFrame.isnull: Indicate missing values.
DataFrame.notnull : Indicate existing (non-missing) values.
Examples
--------
>>> df = ks.DataFrame({"name": ['Alfred', 'Batman', 'Catwoman'],
... "toy": [None, 'Batmobile', 'Bullwhip'],
... "born": [None, "1940-04-25", None]},
... columns=['name', 'toy', 'born'])
>>> df
name toy born
0 Alfred None None
1 Batman Batmobile 1940-04-25
2 Catwoman Bullwhip None
Drop the rows where at least one element is missing.
>>> df.dropna()
name toy born
1 Batman Batmobile 1940-04-25
Drop the rows where all elements are missing.
>>> df.dropna(how='all')
name toy born
0 Alfred None None
1 Batman Batmobile 1940-04-25
2 Catwoman Bullwhip None
Keep only the rows with at least 2 non-NA values.
>>> df.dropna(thresh=2)
name toy born
1 Batman Batmobile 1940-04-25
2 Catwoman Bullwhip None
Define in which columns to look for missing values.
>>> df.dropna(subset=['name', 'born'])
name toy born
1 Batman Batmobile 1940-04-25
Keep the DataFrame with valid entries in the same variable.
>>> df.dropna(inplace=True)
>>> df
name toy born
1 Batman Batmobile 1940-04-25
"""
axis = validate_axis(axis)
if axis == 0:
if subset is not None:
if isinstance(subset, str):
labels = [(subset,)]
elif isinstance(subset, tuple):
labels = [subset]
else:
labels = [sub if isinstance(sub, tuple) else (sub,) for sub in subset]
invalids = [label for label in labels
if label not in self._internal.column_labels]
if len(invalids) > 0:
raise KeyError(invalids)
else:
labels = self._internal.column_labels
cnt = reduce(lambda x, y: x + y,
[F.when(self[label].notna()._scol, 1).otherwise(0)
for label in labels],
F.lit(0))
if thresh is not None:
pred = cnt >= F.lit(int(thresh))
elif how == 'any':
pred = cnt == F.lit(len(labels))
elif how == 'all':
pred = cnt > F.lit(0)
else:
if how is not None:
raise ValueError('invalid how option: {h}'.format(h=how))
else:
raise TypeError('must specify how or thresh')
internal = self._internal.with_filter(pred)
if inplace:
self._internal = internal
else:
return DataFrame(internal)
else:
raise NotImplementedError("dropna currently only works for axis=0 or axis='index'")
# TODO: add 'limit' when value parameter exists
def fillna(self, value=None, method=None, axis=None, inplace=False, limit=None):
"""Fill NA/NaN values.
.. note:: the current implementation of 'method' parameter in fillna uses Spark's Window
without specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
value : scalar, dict, Series
Value to use to fill holes. alternately a dict/Series of values
specifying which value to use for each column.
DataFrame is not supported.
method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None
Method to use for filling holes in reindexed Series pad / ffill: propagate last valid
observation forward to next valid backfill / bfill:
use NEXT valid observation to fill gap
axis : {0 or `index`}
1 and `columns` are not supported.
inplace : boolean, default False
Fill in place (do not create a new object)
limit : int, default None
If method is specified, this is the maximum number of consecutive NaN values to
forward/backward fill. In other words, if there is a gap with more than this number of
consecutive NaNs, it will only be partially filled. If method is not specified,
this is the maximum number of entries along the entire axis where NaNs will be filled.
Must be greater than 0 if not None
Returns
-------
DataFrame
DataFrame with NA entries filled.
Examples
--------
>>> df = ks.DataFrame({
... 'A': [None, 3, None, None],
... 'B': [2, 4, None, 3],
... 'C': [None, None, None, 1],
... 'D': [0, 1, 5, 4]
... },
... columns=['A', 'B', 'C', 'D'])
>>> df
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 NaN NaN NaN 5
3 NaN 3.0 1.0 4
Replace all NaN elements with 0s.
>>> df.fillna(0)
A B C D
0 0.0 2.0 0.0 0
1 3.0 4.0 0.0 1
2 0.0 0.0 0.0 5
3 0.0 3.0 1.0 4
We can also propagate non-null values forward or backward.
>>> df.fillna(method='ffill')
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 3.0 4.0 NaN 5
3 3.0 3.0 1.0 4
Replace all NaN elements in column 'A', 'B', 'C', and 'D', with 0, 1,
2, and 3 respectively.
>>> values = {'A': 0, 'B': 1, 'C': 2, 'D': 3}
>>> df.fillna(value=values)
A B C D
0 0.0 2.0 2.0 0
1 3.0 4.0 2.0 1
2 0.0 1.0 2.0 5
3 0.0 3.0 1.0 4
"""
if value is not None:
axis = validate_axis(axis)
if axis != 0:
raise NotImplementedError("fillna currently only works for axis=0 or axis='index'")
if not isinstance(value, (float, int, str, bool, dict, pd.Series)):
raise TypeError("Unsupported type %s" % type(value))
if limit is not None:
raise ValueError('limit parameter for value is not support now')
if isinstance(value, pd.Series):
value = value.to_dict()
if isinstance(value, dict):
for v in value.values():
if not isinstance(v, (float, int, str, bool)):
raise TypeError("Unsupported type %s" % type(v))
value = {k if isinstance(k, tuple) else (k,): v for k, v in value.items()}
def op(kser):
label = kser._internal.column_labels[0]
for k, v in value.items():
if k == label[:len(k)]:
return kser.fillna(value=value[k], method=method, axis=axis,
inplace=False, limit=limit)
else:
return kser
else:
op = lambda kser: kser.fillna(value=value, method=method, axis=axis,
inplace=False, limit=limit)
elif method is not None:
op = lambda kser: kser.fillna(value=value, method=method, axis=axis,
inplace=False, limit=limit)
else:
raise ValueError("Must specify a fillna 'value' or 'method' parameter.")
kdf = self._apply_series_op(op)
if inplace:
self._internal = kdf._internal
else:
return kdf
# TODO: add 'downcast' when value parameter exists
def bfill(self, axis=None, inplace=False, limit=None):
"""
Synonym for `DataFrame.fillna()` with ``method=`bfill```.
.. note:: the current implementation of 'bfiff' uses Spark's Window
without specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
axis : {0 or `index`}
1 and `columns` are not supported.
inplace : boolean, default False
Fill in place (do not create a new object)
limit : int, default None
If method is specified, this is the maximum number of consecutive NaN values to
forward/backward fill. In other words, if there is a gap with more than this number of
consecutive NaNs, it will only be partially filled. If method is not specified,
this is the maximum number of entries along the entire axis where NaNs will be filled.
Must be greater than 0 if not None
Returns
-------
DataFrame
DataFrame with NA entries filled.
Examples
--------
>>> df = ks.DataFrame({
... 'A': [None, 3, None, None],
... 'B': [2, 4, None, 3],
... 'C': [None, None, None, 1],
... 'D': [0, 1, 5, 4]
... },
... columns=['A', 'B', 'C', 'D'])
>>> df
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 NaN NaN NaN 5
3 NaN 3.0 1.0 4
Propagate non-null values backward.
>>> df.bfill()
A B C D
0 3.0 2.0 1.0 0
1 3.0 4.0 1.0 1
2 NaN 3.0 1.0 5
3 NaN 3.0 1.0 4
"""
return self.fillna(method='bfill', axis=axis, inplace=inplace, limit=limit)
# TODO: add 'downcast' when value parameter exists
def ffill(self, axis=None, inplace=False, limit=None):
"""
Synonym for `DataFrame.fillna()` with ``method=`ffill```.
.. note:: the current implementation of 'ffiff' uses Spark's Window
without specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
axis : {0 or `index`}
1 and `columns` are not supported.
inplace : boolean, default False
Fill in place (do not create a new object)
limit : int, default None
If method is specified, this is the maximum number of consecutive NaN values to
forward/backward fill. In other words, if there is a gap with more than this number of
consecutive NaNs, it will only be partially filled. If method is not specified,
this is the maximum number of entries along the entire axis where NaNs will be filled.
Must be greater than 0 if not None
Returns
-------
DataFrame
DataFrame with NA entries filled.
Examples
--------
>>> df = ks.DataFrame({
... 'A': [None, 3, None, None],
... 'B': [2, 4, None, 3],
... 'C': [None, None, None, 1],
... 'D': [0, 1, 5, 4]
... },
... columns=['A', 'B', 'C', 'D'])
>>> df
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 NaN NaN NaN 5
3 NaN 3.0 1.0 4
Propagate non-null values forward.
>>> df.ffill()
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 3.0 4.0 NaN 5
3 3.0 3.0 1.0 4
"""
return self.fillna(method='ffill', axis=axis, inplace=inplace, limit=limit)
def replace(self, to_replace=None, value=None, subset=None, inplace=False,
limit=None, regex=False, method='pad'):
"""
Returns a new DataFrame replacing a value with another value.
Parameters
----------
to_replace : int, float, string, or list
Value to be replaced. If the value is a dict, then value is ignored and
to_replace must be a mapping from column name (string) to replacement value.
The value to be replaced must be an int, float, or string.
value : int, float, string, or list
Value to use to replace holes. The replacement value must be an int, float,
or string. If value is a list, value should be of the same length with to_replace.
subset : string, list
Optional list of column names to consider. Columns specified in subset that
do not have matching data type are ignored. For example, if value is a string,
and subset contains a non-string column, then the non-string column is simply ignored.
inplace : boolean, default False
Fill in place (do not create a new object)
Returns
-------
DataFrame
Object after replacement.
Examples
--------
>>> df = ks.DataFrame({"name": ['Ironman', 'Captain America', 'Thor', 'Hulk'],
... "weapon": ['Mark-45', 'Shield', 'Mjolnir', 'Smash']},
... columns=['name', 'weapon'])
>>> df
name weapon
0 Ironman Mark-45
1 Captain America Shield
2 Thor Mjolnir
3 Hulk Smash
Scalar `to_replace` and `value`
>>> df.replace('Ironman', 'War-Machine')
name weapon
0 War-Machine Mark-45
1 Captain America Shield
2 Thor Mjolnir
3 Hulk Smash
List like `to_replace` and `value`
>>> df.replace(['Ironman', 'Captain America'], ['Rescue', 'Hawkeye'], inplace=True)
>>> df
name weapon
0 Rescue Mark-45
1 Hawkeye Shield
2 Thor Mjolnir
3 Hulk Smash
Replacing value by specifying column
>>> df.replace('Mjolnir', 'Stormbuster', subset='weapon')
name weapon
0 Rescue Mark-45
1 Hawkeye Shield
2 Thor Stormbuster
3 Hulk Smash
Dict like `to_replace`
>>> df = ks.DataFrame({'A': [0, 1, 2, 3, 4],
... 'B': [5, 6, 7, 8, 9],
... 'C': ['a', 'b', 'c', 'd', 'e']},
... columns=['A', 'B', 'C'])
>>> df.replace({'A': {0: 100, 4: 400}})
A B C
0 100 5 a
1 1 6 b
2 2 7 c
3 3 8 d
4 400 9 e
>>> df.replace({'A': 0, 'B': 5}, 100)
A B C
0 100 100 a
1 1 6 b
2 2 7 c
3 3 8 d
4 4 9 e
Notes
-----
One difference between this implementation and pandas is that it is necessary
to specify the column name when you are passing dictionary in `to_replace`
parameter. Calling `replace` on its index such as `df.replace({0: 10, 1: 100})` will
throw an error. Instead specify column-name like `df.replace({'A': {0: 10, 1: 100}})`.
"""
if method != 'pad':
raise NotImplementedError("replace currently works only for method='pad")
if limit is not None:
raise NotImplementedError("replace currently works only when limit=None")
if regex is not False:
raise NotImplementedError("replace currently doesn't supports regex")
if value is not None and not isinstance(value, (int, float, str, list, dict)):
raise TypeError("Unsupported type {}".format(type(value)))
if to_replace is not None and not isinstance(to_replace, (int, float, str, list, dict)):
raise TypeError("Unsupported type {}".format(type(to_replace)))
if isinstance(value, list) and isinstance(to_replace, list):
if len(value) != len(to_replace):
raise ValueError('Length of to_replace and value must be same')
# TODO: Do we still need to support this argument?
if subset is None:
subset = self._internal.column_labels
elif isinstance(subset, str):
subset = [(subset,)]
elif isinstance(subset, tuple):
subset = [subset]
else:
subset = [sub if isinstance(sub, tuple) else (sub,) for sub in subset]
subset = [self._internal.column_name_for(label) for label in subset]
sdf = self._sdf
if isinstance(to_replace, dict) and value is None and \
(not any(isinstance(i, dict) for i in to_replace.values())):
sdf = sdf.replace(to_replace, value, subset)
elif isinstance(to_replace, dict):
for name, replacement in to_replace.items():
if isinstance(name, str):
name = (name,)
df_column = self._internal.column_name_for(name)
if isinstance(replacement, dict):
sdf = sdf.replace(replacement, subset=df_column)
else:
sdf = sdf.withColumn(df_column,
F.when(scol_for(sdf, df_column) == replacement, value)
.otherwise(scol_for(sdf, df_column)))
else:
sdf = sdf.replace(to_replace, value, subset)
internal = self._internal.copy(sdf=sdf,
column_scols=[scol_for(sdf, col)
for col in self._internal.data_columns])
if inplace:
self._internal = internal
else:
return DataFrame(internal)
def clip(self, lower: Union[float, int] = None, upper: Union[float, int] = None) \
-> 'DataFrame':
"""
Trim values at input threshold(s).
Assigns values outside boundary to boundary values.
Parameters
----------
lower : float or int, default None
Minimum threshold value. All values below this threshold will be set to it.
upper : float or int, default None
Maximum threshold value. All values above this threshold will be set to it.
Returns
-------
DataFrame
DataFrame with the values outside the clip boundaries replaced.
Examples
--------
>>> ks.DataFrame({'A': [0, 2, 4]}).clip(1, 3)
A
0 1
1 2
2 3
Notes
-----
One difference between this implementation and pandas is that running
pd.DataFrame({'A': ['a', 'b']}).clip(0, 1) will crash with "TypeError: '<=' not supported
between instances of 'str' and 'int'" while ks.DataFrame({'A': ['a', 'b']}).clip(0, 1)
will output the original DataFrame, simply ignoring the incompatible types.
"""
if is_list_like(lower) or is_list_like(upper):
raise ValueError("List-like value are not supported for 'lower' and 'upper' at the " +
"moment")
if lower is None and upper is None:
return self
numeric_types = (DecimalType, DoubleType, FloatType, ByteType, IntegerType, LongType,
ShortType)
def op(kser):
if isinstance(kser.spark_type, numeric_types):
scol = kser._scol
if lower is not None:
scol = F.when(scol < lower, lower).otherwise(scol)
if upper is not None:
scol = F.when(scol > upper, upper).otherwise(scol)
return scol.alias(kser._internal.data_columns[0])
else:
return kser
return self._apply_series_op(op)
def head(self, n=5):
"""
Return the first `n` rows.
This function returns the first `n` rows for the object based
on position. It is useful for quickly testing if your object
has the right type of data in it.
Parameters
----------
n : int, default 5
Number of rows to select.
Returns
-------
obj_head : same type as caller
The first `n` rows of the caller object.
Examples
--------
>>> df = ks.DataFrame({'animal':['alligator', 'bee', 'falcon', 'lion',
... 'monkey', 'parrot', 'shark', 'whale', 'zebra']})
>>> df
animal
0 alligator
1 bee
2 falcon
3 lion
4 monkey
5 parrot
6 shark
7 whale
8 zebra
Viewing the first 5 lines
>>> df.head()
animal
0 alligator
1 bee
2 falcon
3 lion
4 monkey
Viewing the first `n` lines (three in this case)
>>> df.head(3)
animal
0 alligator
1 bee
2 falcon
"""
if get_option('compute.ordered_head'):
sdf = self._sdf.orderBy(NATURAL_ORDER_COLUMN_NAME)
else:
sdf = self._sdf
return DataFrame(self._internal.copy(sdf=sdf.limit(n)))
def pivot_table(self, values=None, index=None, columns=None,
aggfunc='mean', fill_value=None):
"""
Create a spreadsheet-style pivot table as a DataFrame. The levels in
the pivot table will be stored in MultiIndex objects (hierarchical
indexes) on the index and columns of the result DataFrame.
Parameters
----------
values : column to aggregate.
They should be either a list less than three or a string.
index : column (string) or list of columns
If an array is passed, it must be the same length as the data.
The list should contain string.
columns : column
Columns used in the pivot operation. Only one column is supported and
it should be a string.
aggfunc : function (string), dict, default mean
If dict is passed, the resulting pivot table will have
columns concatenated by "_" where the first part is the value
of columns and the second part is the column name in values
If dict is passed, the key is column to aggregate and value
is function or list of functions.
fill_value : scalar, default None
Value to replace missing values with.
Returns
-------
table : DataFrame
Examples
--------
>>> df = ks.DataFrame({"A": ["foo", "foo", "foo", "foo", "foo",
... "bar", "bar", "bar", "bar"],
... "B": ["one", "one", "one", "two", "two",
... "one", "one", "two", "two"],
... "C": ["small", "large", "large", "small",
... "small", "large", "small", "small",
... "large"],
... "D": [1, 2, 2, 3, 3, 4, 5, 6, 7],
... "E": [2, 4, 5, 5, 6, 6, 8, 9, 9]},
... columns=['A', 'B', 'C', 'D', 'E'])
>>> df
A B C D E
0 foo one small 1 2
1 foo one large 2 4
2 foo one large 2 5
3 foo two small 3 5
4 foo two small 3 6
5 bar one large 4 6
6 bar one small 5 8
7 bar two small 6 9
8 bar two large 7 9
This first example aggregates values by taking the sum.
>>> table = df.pivot_table(values='D', index=['A', 'B'],
... columns='C', aggfunc='sum')
>>> table.sort_index() # doctest: +NORMALIZE_WHITESPACE
C large small
A B
bar one 4.0 5
two 7.0 6
foo one 4.0 1
two NaN 6
We can also fill missing values using the `fill_value` parameter.
>>> table = df.pivot_table(values='D', index=['A', 'B'],
... columns='C', aggfunc='sum', fill_value=0)
>>> table.sort_index() # doctest: +NORMALIZE_WHITESPACE
C large small
A B
bar one 4 5
two 7 6
foo one 4 1
two 0 6
We can also calculate multiple types of aggregations for any given
value column.
>>> table = df.pivot_table(values=['D'], index =['C'],
... columns="A", aggfunc={'D': 'mean'})
>>> table.sort_index() # doctest: +NORMALIZE_WHITESPACE
D
A bar foo
C
large 5.5 2.000000
small 5.5 2.333333
The next example aggregates on multiple values.
>>> table = df.pivot_table(index=['C'], columns="A", values=['D', 'E'],
... aggfunc={'D': 'mean', 'E': 'sum'})
>>> table.sort_index() # doctest: +NORMALIZE_WHITESPACE
D E
A bar foo bar foo
C
large 5.5 2.000000 15 9
small 5.5 2.333333 17 13
"""
if not isinstance(columns, (str, tuple)):
raise ValueError("columns should be string or tuple.")
if not isinstance(values, (str, tuple)) and not isinstance(values, list):
raise ValueError('values should be string or list of one column.')
if not isinstance(aggfunc, str) and \
(not isinstance(aggfunc, dict) or
not all(isinstance(key, (str, tuple)) and isinstance(value, str)
for key, value in aggfunc.items())):
raise ValueError("aggfunc must be a dict mapping from column name (string or tuple) "
"to aggregate functions (string).")
if isinstance(aggfunc, dict) and index is None:
raise NotImplementedError("pivot_table doesn't support aggfunc"
" as dict and without index.")
if isinstance(values, list) and index is None:
raise NotImplementedError("values can't be a list without index.")
if columns not in self.columns:
raise ValueError("Wrong columns {}.".format(columns))
if isinstance(columns, str):
columns = (columns,)
if isinstance(values, list):
values = [col if isinstance(col, tuple) else (col,) for col in values]
if not all(isinstance(self._internal.spark_type_for(col), NumericType)
for col in values):
raise TypeError('values should be a numeric type.')
else:
values = values if isinstance(values, tuple) else (values,)
if not isinstance(self._internal.spark_type_for(values), NumericType):
raise TypeError('values should be a numeric type.')
if isinstance(aggfunc, str):
if isinstance(values, list):
agg_cols = [F.expr('{1}(`{0}`) as `{0}`'
.format(self._internal.column_name_for(value), aggfunc))
for value in values]
else:
agg_cols = [F.expr('{1}(`{0}`) as `{0}`'
.format(self._internal.column_name_for(values), aggfunc))]
elif isinstance(aggfunc, dict):
aggfunc = {key if isinstance(key, tuple) else (key,): value
for key, value in aggfunc.items()}
agg_cols = [F.expr('{1}(`{0}`) as `{0}`'
.format(self._internal.column_name_for(key), value))
for key, value in aggfunc.items()]
agg_columns = [key for key, _ in aggfunc.items()]
if set(agg_columns) != set(values):
raise ValueError("Columns in aggfunc must be the same as values.")
if index is None:
sdf = self._sdf.groupBy() \
.pivot(pivot_col=self._internal.column_name_for(columns)).agg(*agg_cols)
elif isinstance(index, list):
index = [label if isinstance(label, tuple) else (label,) for label in index]
sdf = self._sdf.groupBy([self._internal.scol_for(label) for label in index]) \
.pivot(pivot_col=self._internal.column_name_for(columns)).agg(*agg_cols)
else:
raise ValueError("index should be a None or a list of columns.")
if fill_value is not None and isinstance(fill_value, (int, float)):
sdf = sdf.fillna(fill_value)
if index is not None:
if isinstance(values, list):
index_columns = [self._internal.column_name_for(label) for label in index]
data_columns = [column for column in sdf.columns if column not in index_columns]
if len(values) > 1:
# If we have two values, Spark will return column's name
# in this format: column_values, where column contains
# their values in the DataFrame and values is
# the column list passed to the pivot_table().
# E.g. if column is b and values is ['b','e'],
# then ['2_b', '2_e', '3_b', '3_e'].
# We sort the columns of Spark DataFrame by values.
data_columns.sort(key=lambda x: x.split('_', 1)[1])
sdf = sdf.select(index_columns + data_columns)
column_name_to_index = dict(zip(self._internal.data_columns,
self._internal.column_labels))
column_labels = [tuple(list(column_name_to_index[name.split('_')[1]])
+ [name.split('_')[0]])
for name in data_columns]
index_map = list(zip(index_columns, index))
column_label_names = (([None] * column_labels_level(values))
+ [str(columns) if len(columns) > 1 else columns[0]])
internal = _InternalFrame(sdf=sdf,
index_map=index_map,
column_labels=column_labels,
column_scols=[scol_for(sdf, col)
for col in data_columns],
column_label_names=column_label_names)
kdf = DataFrame(internal)
else:
column_labels = [tuple(list(values[0]) + [column]) for column in data_columns]
index_map = list(zip(index_columns, index))
column_label_names = (([None] * len(values[0]))
+ [str(columns) if len(columns) > 1 else columns[0]])
internal = _InternalFrame(sdf=sdf,
index_map=index_map,
column_labels=column_labels,
column_scols=[scol_for(sdf, col)
for col in data_columns],
column_label_names=column_label_names)
kdf = DataFrame(internal)
return kdf
else:
index_columns = [self._internal.column_name_for(label) for label in index]
index_map = list(zip(index_columns, index))
column_label_names = [str(columns) if len(columns) > 1 else columns[0]]
internal = _InternalFrame(sdf=sdf,
index_map=index_map,
column_label_names=column_label_names)
return DataFrame(internal)
else:
if isinstance(values, list):
index_values = values[-1]
else:
index_values = values
index_map = []
for i, index_value in enumerate(index_values):
colname = SPARK_INDEX_NAME_FORMAT(i)
sdf = sdf.withColumn(colname, F.lit(index_value))
index_map.append((colname, None))
column_label_names = [str(columns) if len(columns) > 1 else columns[0]]
internal = _InternalFrame(sdf=sdf,
index_map=index_map,
column_label_names=column_label_names)
return DataFrame(internal)
def pivot(self, index=None, columns=None, values=None):
"""
Return reshaped DataFrame organized by given index / column values.
Reshape data (produce a "pivot" table) based on column values. Uses
unique values from specified `index` / `columns` to form axes of the
resulting DataFrame. This function does not support data
aggregation.
Parameters
----------
index : string, optional
Column to use to make new frame's index. If None, uses
existing index.
columns : string
Column to use to make new frame's columns.
values : string, object or a list of the previous
Column(s) to use for populating new frame's values.
Returns
-------
DataFrame
Returns reshaped DataFrame.
See Also
--------
DataFrame.pivot_table : Generalization of pivot that can handle
duplicate values for one index/column pair.
Examples
--------
>>> df = ks.DataFrame({'foo': ['one', 'one', 'one', 'two', 'two',
... 'two'],
... 'bar': ['A', 'B', 'C', 'A', 'B', 'C'],
... 'baz': [1, 2, 3, 4, 5, 6],
... 'zoo': ['x', 'y', 'z', 'q', 'w', 't']},
... columns=['foo', 'bar', 'baz', 'zoo'])
>>> df
foo bar baz zoo
0 one A 1 x
1 one B 2 y
2 one C 3 z
3 two A 4 q
4 two B 5 w
5 two C 6 t
>>> df.pivot(index='foo', columns='bar', values='baz').sort_index()
... # doctest: +NORMALIZE_WHITESPACE
bar A B C
foo
one 1 2 3
two 4 5 6
>>> df.pivot(columns='bar', values='baz').sort_index() # doctest: +NORMALIZE_WHITESPACE
bar A B C
0 1.0 NaN NaN
1 NaN 2.0 NaN
2 NaN NaN 3.0
3 4.0 NaN NaN
4 NaN 5.0 NaN
5 NaN NaN 6.0
Notice that, unlike pandas raises an ValueError when duplicated values are found,
Koalas' pivot still works with its first value it meets during operation because pivot
is an expensive operation and it is preferred to permissively execute over failing fast
when processing large data.
>>> df = ks.DataFrame({"foo": ['one', 'one', 'two', 'two'],
... "bar": ['A', 'A', 'B', 'C'],
... "baz": [1, 2, 3, 4]}, columns=['foo', 'bar', 'baz'])
>>> df
foo bar baz
0 one A 1
1 one A 2
2 two B 3
3 two C 4
>>> df.pivot(index='foo', columns='bar', values='baz').sort_index()
... # doctest: +NORMALIZE_WHITESPACE
bar A B C
foo
one 1.0 NaN NaN
two NaN 3.0 4.0
"""
if columns is None:
raise ValueError("columns should be set.")
if values is None:
raise ValueError("values should be set.")
should_use_existing_index = index is not None
if should_use_existing_index:
df = self
index = [index]
else:
df = self.copy()
df['__DUMMY__'] = F.monotonically_increasing_id()
df.set_index('__DUMMY__', append=True, inplace=True)
df.reset_index(level=range(len(df._internal.index_map) - 1), inplace=True)
index = df._internal.column_labels[:len(df._internal.index_map)]
df = df.pivot_table(
index=index, columns=columns, values=values, aggfunc='first')
if should_use_existing_index:
return df
else:
index_columns = df._internal.index_columns
# Note that the existing indexing column won't exist in the pivoted DataFrame.
internal = df._internal.copy(
index_map=[(index_column, None) for index_column in index_columns])
return DataFrame(internal)
@property
def columns(self):
"""The column labels of the DataFrame."""
if self._internal.column_labels_level > 1:
columns = pd.MultiIndex.from_tuples(self._internal.column_labels)
else:
columns = pd.Index([label[0] for label in self._internal.column_labels])
if self._internal.column_label_names is not None:
columns.names = self._internal.column_label_names
return columns
@columns.setter
def columns(self, columns):
if isinstance(columns, pd.MultiIndex):
column_labels = columns.tolist()
old_names = self._internal.column_labels
if len(old_names) != len(column_labels):
raise ValueError(
"Length mismatch: Expected axis has %d elements, new values have %d elements"
% (len(old_names), len(column_labels)))
column_label_names = columns.names
data_columns = [name_like_string(label) for label in column_labels]
column_scols = [self._internal.scol_for(label).alias(name)
for label, name in zip(self._internal.column_labels, data_columns)]
self._internal = self._internal.with_new_columns(column_scols,
column_labels=column_labels)
sdf = self._sdf.select(
self._internal.index_scols +
[self._internal.scol_for(label).alias(name)
for label, name in zip(self._internal.column_labels, data_columns)] +
list(HIDDEN_COLUMNS))
column_scols = [scol_for(sdf, col) for col in data_columns]
self._internal = self._internal.copy(sdf=sdf,
column_labels=column_labels,
column_scols=column_scols,
column_label_names=column_label_names)
else:
old_names = self._internal.column_labels
if len(old_names) != len(columns):
raise ValueError(
"Length mismatch: Expected axis has %d elements, new values have %d elements"
% (len(old_names), len(columns)))
column_labels = [col if isinstance(col, tuple) else (col,) for col in columns]
if isinstance(columns, pd.Index):
column_label_names = columns.names
else:
column_label_names = None
data_columns = [name_like_string(label) for label in column_labels]
sdf = self._sdf.select(
self._internal.index_scols +
[self._internal.scol_for(label).alias(name)
for label, name in zip(self._internal.column_labels, data_columns)] +
list(HIDDEN_COLUMNS))
column_scols = [scol_for(sdf, col) for col in data_columns]
self._internal = self._internal.copy(sdf=sdf,
column_labels=column_labels,
column_scols=column_scols,
column_label_names=column_label_names)
@property
def dtypes(self):
"""Return the dtypes in the DataFrame.
This returns a Series with the data type of each column. The result's index is the original
DataFrame's columns. Columns with mixed types are stored with the object dtype.
Returns
-------
pd.Series
The data type of each column.
Examples
--------
>>> df = ks.DataFrame({'a': list('abc'),
... 'b': list(range(1, 4)),
... 'c': np.arange(3, 6).astype('i1'),
... 'd': np.arange(4.0, 7.0, dtype='float64'),
... 'e': [True, False, True],
... 'f': pd.date_range('20130101', periods=3)},
... columns=['a', 'b', 'c', 'd', 'e', 'f'])
>>> df.dtypes
a object
b int64
c int8
d float64
e bool
f datetime64[ns]
dtype: object
"""
return pd.Series([self[label].dtype for label in self._internal.column_labels],
index=pd.Index([label if len(label) > 1 else label[0]
for label in self._internal.column_labels]))
def select_dtypes(self, include=None, exclude=None):
"""
Return a subset of the DataFrame's columns based on the column dtypes.
Parameters
----------
include, exclude : scalar or list-like
A selection of dtypes or strings to be included/excluded. At least
one of these parameters must be supplied. It also takes Spark SQL
DDL type strings, for instance, 'string' and 'date'.
Returns
-------
DataFrame
The subset of the frame including the dtypes in ``include`` and
excluding the dtypes in ``exclude``.
Raises
------
ValueError
* If both of ``include`` and ``exclude`` are empty
>>> df = pd.DataFrame({'a': [1, 2] * 3,
... 'b': [True, False] * 3,
... 'c': [1.0, 2.0] * 3})
>>> df.select_dtypes()
Traceback (most recent call last):
...
ValueError: at least one of include or exclude must be nonempty
* If ``include`` and ``exclude`` have overlapping elements
>>> df = pd.DataFrame({'a': [1, 2] * 3,
... 'b': [True, False] * 3,
... 'c': [1.0, 2.0] * 3})
>>> df.select_dtypes(include='a', exclude='a')
Traceback (most recent call last):
...
TypeError: string dtypes are not allowed, use 'object' instead
Notes
-----
* To select datetimes, use ``np.datetime64``, ``'datetime'`` or
``'datetime64'``
Examples
--------
>>> df = ks.DataFrame({'a': [1, 2] * 3,
... 'b': [True, False] * 3,
... 'c': [1.0, 2.0] * 3,
... 'd': ['a', 'b'] * 3}, columns=['a', 'b', 'c', 'd'])
>>> df
a b c d
0 1 True 1.0 a
1 2 False 2.0 b
2 1 True 1.0 a
3 2 False 2.0 b
4 1 True 1.0 a
5 2 False 2.0 b
>>> df.select_dtypes(include='bool')
b
0 True
1 False
2 True
3 False
4 True
5 False
>>> df.select_dtypes(include=['float64'], exclude=['int'])
c
0 1.0
1 2.0
2 1.0
3 2.0
4 1.0
5 2.0
>>> df.select_dtypes(exclude=['int'])
b c d
0 True 1.0 a
1 False 2.0 b
2 True 1.0 a
3 False 2.0 b
4 True 1.0 a
5 False 2.0 b
Spark SQL DDL type strings can be used as well.
>>> df.select_dtypes(exclude=['string'])
a b c
0 1 True 1.0
1 2 False 2.0
2 1 True 1.0
3 2 False 2.0
4 1 True 1.0
5 2 False 2.0
"""
from pyspark.sql.types import _parse_datatype_string
if not is_list_like(include):
include = (include,) if include is not None else ()
if not is_list_like(exclude):
exclude = (exclude,) if exclude is not None else ()
if not any((include, exclude)):
raise ValueError('at least one of include or exclude must be '
'nonempty')
# can't both include AND exclude!
if set(include).intersection(set(exclude)):
raise ValueError('include and exclude overlap on {inc_ex}'.format(
inc_ex=set(include).intersection(set(exclude))))
# Handle Spark types
include_spark_type = []
for inc in include:
try:
include_spark_type.append(_parse_datatype_string(inc))
except:
pass
exclude_spark_type = []
for exc in exclude:
try:
exclude_spark_type.append(_parse_datatype_string(exc))
except:
pass
# Handle Pandas types
include_numpy_type = []
for inc in include:
try:
include_numpy_type.append(infer_dtype_from_object(inc))
except:
pass
exclude_numpy_type = []
for exc in exclude:
try:
exclude_numpy_type.append(infer_dtype_from_object(exc))
except:
pass
column_labels = []
for label in self._internal.column_labels:
if len(include) > 0:
should_include = (
infer_dtype_from_object(self[label].dtype.name) in include_numpy_type or
self._internal.spark_type_for(label) in include_spark_type)
else:
should_include = not (
infer_dtype_from_object(self[label].dtype.name) in exclude_numpy_type or
self._internal.spark_type_for(label) in exclude_spark_type)
if should_include:
column_labels.append(label)
column_scols = [self._internal.scol_for(label) for label in column_labels]
return DataFrame(self._internal.with_new_columns(column_scols, column_labels=column_labels))
def count(self, axis=None):
"""
Count non-NA cells for each column.
The values `None`, `NaN` are considered NA.
Parameters
----------
axis : {0 or ‘index’, 1 or ‘columns’}, default 0
If 0 or ‘index’ counts are generated for each column. If 1 or ‘columns’ counts are
generated for each row.
Returns
-------
pandas.Series
See Also
--------
Series.count: Number of non-NA elements in a Series.
DataFrame.shape: Number of DataFrame rows and columns (including NA
elements).
DataFrame.isna: Boolean same-sized DataFrame showing places of NA
elements.
Examples
--------
Constructing DataFrame from a dictionary:
>>> df = ks.DataFrame({"Person":
... ["John", "Myla", "Lewis", "John", "Myla"],
... "Age": [24., np.nan, 21., 33, 26],
... "Single": [False, True, True, True, False]},
... columns=["Person", "Age", "Single"])
>>> df
Person Age Single
0 John 24.0 False
1 Myla NaN True
2 Lewis 21.0 True
3 John 33.0 True
4 Myla 26.0 False
Notice the uncounted NA values:
>>> df.count()
Person 5
Age 4
Single 5
dtype: int64
>>> df.count(axis=1)
0 3
1 2
2 3
3 3
4 3
Name: 0, dtype: int64
"""
return self._reduce_for_stat_function(
_Frame._count_expr, name="count", axis=axis, numeric_only=False)
def drop(self, labels=None, axis=1,
columns: Union[str, Tuple[str, ...], List[str], List[Tuple[str, ...]]] = None):
"""
Drop specified labels from columns.
Remove columns by specifying label names and axis=1 or columns.
When specifying both labels and columns, only labels will be dropped.
Removing rows is yet to be implemented.
Parameters
----------
labels : single label or list-like
Column labels to drop.
axis : {1 or 'columns'}, default 1
.. dropna currently only works for axis=1 'columns'
axis=0 is yet to be implemented.
columns : single label or list-like
Alternative to specifying axis (``labels, axis=1``
is equivalent to ``columns=labels``).
Returns
-------
dropped : DataFrame
See Also
--------
Series.dropna
Examples
--------
>>> df = ks.DataFrame({'x': [1, 2], 'y': [3, 4], 'z': [5, 6], 'w': [7, 8]},
... columns=['x', 'y', 'z', 'w'])
>>> df
x y z w
0 1 3 5 7
1 2 4 6 8
>>> df.drop('x', axis=1)
y z w
0 3 5 7
1 4 6 8
>>> df.drop(['y', 'z'], axis=1)
x w
0 1 7
1 2 8
>>> df.drop(columns=['y', 'z'])
x w
0 1 7
1 2 8
Also support for MultiIndex
>>> df = ks.DataFrame({'x': [1, 2], 'y': [3, 4], 'z': [5, 6], 'w': [7, 8]},
... columns=['x', 'y', 'z', 'w'])
>>> columns = [('a', 'x'), ('a', 'y'), ('b', 'z'), ('b', 'w')]
>>> df.columns = pd.MultiIndex.from_tuples(columns)
>>> df # doctest: +NORMALIZE_WHITESPACE
a b
x y z w
0 1 3 5 7
1 2 4 6 8
>>> df.drop('a') # doctest: +NORMALIZE_WHITESPACE
b
z w
0 5 7
1 6 8
Notes
-----
Currently only axis = 1 is supported in this function,
axis = 0 is yet to be implemented.
"""
if labels is not None:
axis = validate_axis(axis)
if axis == 1:
return self.drop(columns=labels)
raise NotImplementedError("Drop currently only works for axis=1")
elif columns is not None:
if isinstance(columns, str):
columns = [(columns,)] # type: ignore
elif isinstance(columns, tuple):
columns = [columns]
else:
columns = [col if isinstance(col, tuple) else (col,) # type: ignore
for col in columns]
drop_column_labels = set(label for label in self._internal.column_labels
for col in columns
if label[:len(col)] == col)
if len(drop_column_labels) == 0:
raise KeyError(columns)
cols, labels = zip(*((column, label)
for column, label
in zip(self._internal.data_columns, self._internal.column_labels)
if label not in drop_column_labels))
column_scols = [self._internal.scol_for(label) for label in labels]
internal = self._internal.with_new_columns(column_scols, column_labels=list(labels))
return DataFrame(internal)
else:
raise ValueError("Need to specify at least one of 'labels' or 'columns'")
def _sort(self, by: List[Column], ascending: Union[bool, List[bool]],
inplace: bool, na_position: str):
if isinstance(ascending, bool):
ascending = [ascending] * len(by)
if len(ascending) != len(by):
raise ValueError('Length of ascending ({}) != length of by ({})'
.format(len(ascending), len(by)))
if na_position not in ('first', 'last'):
raise ValueError("invalid na_position: '{}'".format(na_position))
# Mapper: Get a spark column function for (ascending, na_position) combination
# Note that 'asc_nulls_first' and friends were added as of Spark 2.4, see SPARK-23847.
mapper = {
(True, 'first'): lambda x: Column(getattr(x._jc, "asc_nulls_first")()),
(True, 'last'): lambda x: Column(getattr(x._jc, "asc_nulls_last")()),
(False, 'first'): lambda x: Column(getattr(x._jc, "desc_nulls_first")()),
(False, 'last'): lambda x: Column(getattr(x._jc, "desc_nulls_last")()),
}
by = [mapper[(asc, na_position)](scol) for scol, asc in zip(by, ascending)]
sdf = self._sdf.sort(*(by + [NATURAL_ORDER_COLUMN_NAME])).drop(NATURAL_ORDER_COLUMN_NAME)
kdf = DataFrame(self._internal.copy(sdf=sdf)) # type: ks.DataFrame
if inplace:
self._internal = kdf._internal
return None
else:
return kdf
def sort_values(self, by: Union[str, List[str], Tuple[str, ...], List[Tuple[str, ...]]],
ascending: Union[bool, List[bool]] = True,
inplace: bool = False, na_position: str = 'last') -> Optional['DataFrame']:
"""
Sort by the values along either axis.
Parameters
----------
by : str or list of str
ascending : bool or list of bool, default True
Sort ascending vs. descending. Specify list for multiple sort
orders. If this is a list of bools, must match the length of
the by.
inplace : bool, default False
if True, perform operation in-place
na_position : {'first', 'last'}, default 'last'
`first` puts NaNs at the beginning, `last` puts NaNs at the end
Returns
-------
sorted_obj : DataFrame
Examples
--------
>>> df = ks.DataFrame({
... 'col1': ['A', 'B', None, 'D', 'C'],
... 'col2': [2, 9, 8, 7, 4],
... 'col3': [0, 9, 4, 2, 3],
... },
... columns=['col1', 'col2', 'col3'])
>>> df
col1 col2 col3
0 A 2 0
1 B 9 9
2 None 8 4
3 D 7 2
4 C 4 3
Sort by col1
>>> df.sort_values(by=['col1'])
col1 col2 col3
0 A 2 0
1 B 9 9
4 C 4 3
3 D 7 2
2 None 8 4
Sort Descending
>>> df.sort_values(by='col1', ascending=False)
col1 col2 col3
3 D 7 2
4 C 4 3
1 B 9 9
0 A 2 0
2 None 8 4
Sort by multiple columns
>>> df = ks.DataFrame({
... 'col1': ['A', 'A', 'B', None, 'D', 'C'],
... 'col2': [2, 1, 9, 8, 7, 4],
... 'col3': [0, 1, 9, 4, 2, 3],
... },
... columns=['col1', 'col2', 'col3'])
>>> df.sort_values(by=['col1', 'col2'])
col1 col2 col3
1 A 1 1
0 A 2 0
2 B 9 9
5 C 4 3
4 D 7 2
3 None 8 4
"""
if isinstance(by, (str, tuple)):
by = [by] # type: ignore
else:
by = [b if isinstance(b, tuple) else (b,) for b in by] # type: ignore
new_by = []
for colname in by:
ser = self[colname]
if not isinstance(ser, ks.Series):
raise ValueError(
"The column %s is not unique. For a multi-index, the label must be a tuple "
"with elements corresponding to each level." % name_like_string(colname))
new_by.append(ser._scol)
return self._sort(by=new_by, ascending=ascending,
inplace=inplace, na_position=na_position)
def sort_index(self, axis: int = 0,
level: Optional[Union[int, List[int]]] = None, ascending: bool = True,
inplace: bool = False, kind: str = None, na_position: str = 'last') \
-> Optional['DataFrame']:
"""
Sort object by labels (along an axis)
Parameters
----------
axis : index, columns to direct sorting. Currently, only axis = 0 is supported.
level : int or level name or list of ints or list of level names
if not None, sort on values in specified index level(s)
ascending : boolean, default True
Sort ascending vs. descending
inplace : bool, default False
if True, perform operation in-place
kind : str, default None
Koalas does not allow specifying the sorting algorithm at the moment, default None
na_position : {‘first’, ‘last’}, default ‘last’
first puts NaNs at the beginning, last puts NaNs at the end. Not implemented for
MultiIndex.
Returns
-------
sorted_obj : DataFrame
Examples
--------
>>> df = ks.DataFrame({'A': [2, 1, np.nan]}, index=['b', 'a', np.nan])
>>> df.sort_index()
A
a 1.0
b 2.0
NaN NaN
>>> df.sort_index(ascending=False)
A
b 2.0
a 1.0
NaN NaN
>>> df.sort_index(na_position='first')
A
NaN NaN
a 1.0
b 2.0
>>> df.sort_index(inplace=True)
>>> df
A
a 1.0
b 2.0
NaN NaN
>>> df = ks.DataFrame({'A': range(4), 'B': range(4)[::-1]},
... index=[['b', 'b', 'a', 'a'], [1, 0, 1, 0]],
... columns=['A', 'B'])
>>> df.sort_index()
A B
a 0 3 0
1 2 1
b 0 1 2
1 0 3
>>> df.sort_index(level=1) # doctest: +SKIP
A B
a 0 3 0
b 0 1 2
a 1 2 1
b 1 0 3
>>> df.sort_index(level=[1, 0])
A B
a 0 3 0
b 0 1 2
a 1 2 1
b 1 0 3
"""
axis = validate_axis(axis)
if axis != 0:
raise NotImplementedError("No other axis than 0 are supported at the moment")
if kind is not None:
raise NotImplementedError(
"Specifying the sorting algorithm is not supported at the moment.")
if level is None or (is_list_like(level) and len(level) == 0): # type: ignore
by = self._internal.index_scols
elif is_list_like(level):
by = [self._internal.index_scols[l] for l in level] # type: ignore
else:
by = [self._internal.index_scols[level]]
return self._sort(by=by, ascending=ascending,
inplace=inplace, na_position=na_position)
# TODO: add keep = First
def nlargest(self, n: int, columns: 'Any') -> 'DataFrame':
"""
Return the first `n` rows ordered by `columns` in descending order.
Return the first `n` rows with the largest values in `columns`, in
descending order. The columns that are not specified are returned as
well, but not used for ordering.
This method is equivalent to
``df.sort_values(columns, ascending=False).head(n)``, but more
performant in Pandas.
In Koalas, thanks to Spark's lazy execution and query optimizer,
the two would have same performance.
Parameters
----------
n : int
Number of rows to return.
columns : label or list of labels
Column label(s) to order by.
Returns
-------
DataFrame
The first `n` rows ordered by the given columns in descending
order.
See Also
--------
DataFrame.nsmallest : Return the first `n` rows ordered by `columns` in
ascending order.
DataFrame.sort_values : Sort DataFrame by the values.
DataFrame.head : Return the first `n` rows without re-ordering.
Notes
-----
This function cannot be used with all column types. For example, when
specifying columns with `object` or `category` dtypes, ``TypeError`` is
raised.
Examples
--------
>>> df = ks.DataFrame({'X': [1, 2, 3, 5, 6, 7, np.nan],
... 'Y': [6, 7, 8, 9, 10, 11, 12]})
>>> df
X Y
0 1.0 6
1 2.0 7
2 3.0 8
3 5.0 9
4 6.0 10
5 7.0 11
6 NaN 12
In the following example, we will use ``nlargest`` to select the three
rows having the largest values in column "population".
>>> df.nlargest(n=3, columns='X')
X Y
5 7.0 11
4 6.0 10
3 5.0 9
>>> df.nlargest(n=3, columns=['Y', 'X'])
X Y
6 NaN 12
5 7.0 11
4 6.0 10
"""
kdf = self.sort_values(by=columns, ascending=False) # type: Optional[DataFrame]
assert kdf is not None
return kdf.head(n=n)
# TODO: add keep = First
def nsmallest(self, n: int, columns: 'Any') -> 'DataFrame':
"""
Return the first `n` rows ordered by `columns` in ascending order.
Return the first `n` rows with the smallest values in `columns`, in
ascending order. The columns that are not specified are returned as
well, but not used for ordering.
This method is equivalent to ``df.sort_values(columns, ascending=True).head(n)``,
but more performant. In Koalas, thanks to Spark's lazy execution and query optimizer,
the two would have same performance.
Parameters
----------
n : int
Number of items to retrieve.
columns : list or str
Column name or names to order by.
Returns
-------
DataFrame
See Also
--------
DataFrame.nlargest : Return the first `n` rows ordered by `columns` in
descending order.
DataFrame.sort_values : Sort DataFrame by the values.
DataFrame.head : Return the first `n` rows without re-ordering.
Examples
--------
>>> df = ks.DataFrame({'X': [1, 2, 3, 5, 6, 7, np.nan],
... 'Y': [6, 7, 8, 9, 10, 11, 12]})
>>> df
X Y
0 1.0 6
1 2.0 7
2 3.0 8
3 5.0 9
4 6.0 10
5 7.0 11
6 NaN 12
In the following example, we will use ``nsmallest`` to select the
three rows having the smallest values in column "a".
>>> df.nsmallest(n=3, columns='X') # doctest: +NORMALIZE_WHITESPACE
X Y
0 1.0 6
1 2.0 7
2 3.0 8
To order by the largest values in column "a" and then "c", we can
specify multiple columns like in the next example.
>>> df.nsmallest(n=3, columns=['Y', 'X']) # doctest: +NORMALIZE_WHITESPACE
X Y
0 1.0 6
1 2.0 7
2 3.0 8
"""
kdf = self.sort_values(by=columns, ascending=True) # type: Optional[DataFrame]
assert kdf is not None
return kdf.head(n=n)
def isin(self, values):
"""
Whether each element in the DataFrame is contained in values.
Parameters
----------
values : iterable or dict
The sequence of values to test. If values is a dict,
the keys must be the column names, which must match.
Series and DataFrame are not supported.
Returns
-------
DataFrame
DataFrame of booleans showing whether each element in the DataFrame
is contained in values.
Examples
--------
>>> df = ks.DataFrame({'num_legs': [2, 4], 'num_wings': [2, 0]},
... index=['falcon', 'dog'],
... columns=['num_legs', 'num_wings'])
>>> df
num_legs num_wings
falcon 2 2
dog 4 0
When ``values`` is a list check whether every value in the DataFrame
is present in the list (which animals have 0 or 2 legs or wings)
>>> df.isin([0, 2])
num_legs num_wings
falcon True True
dog False True
When ``values`` is a dict, we can pass values to check for each
column separately:
>>> df.isin({'num_wings': [0, 3]})
num_legs num_wings
falcon False False
dog False True
"""
if isinstance(values, (pd.DataFrame, pd.Series)):
raise NotImplementedError("DataFrame and Series are not supported")
if isinstance(values, dict) and not set(values.keys()).issubset(self.columns):
raise AttributeError(
"'DataFrame' object has no attribute %s"
% (set(values.keys()).difference(self.columns)))
column_scols = []
if isinstance(values, dict):
for i, col in enumerate(self.columns):
if col in values:
column_scols.append(self._internal.scol_for(self._internal.column_labels[i])
.isin(values[col]).alias(self._internal.data_columns[i]))
else:
column_scols.append(F.lit(False).alias(self._internal.data_columns[i]))
elif is_list_like(values):
column_scols += [
self._internal.scol_for(label).isin(list(values))
.alias(self._internal.column_name_for(label))
for label in self._internal.column_labels]
else:
raise TypeError('Values should be iterable, Series, DataFrame or dict.')
return DataFrame(self._internal.with_new_columns(column_scols))
@property
def shape(self):
"""
Return a tuple representing the dimensionality of the DataFrame.
Examples
--------
>>> df = ks.DataFrame({'col1': [1, 2], 'col2': [3, 4]})
>>> df.shape
(2, 2)
>>> df = ks.DataFrame({'col1': [1, 2], 'col2': [3, 4],
... 'col3': [5, 6]})
>>> df.shape
(2, 3)
"""
return len(self), len(self.columns)
def merge(self, right: 'DataFrame', how: str = 'inner',
on: Union[str, List[str], Tuple[str, ...], List[Tuple[str, ...]]] = None,
left_on: Union[str, List[str], Tuple[str, ...], List[Tuple[str, ...]]] = None,
right_on: Union[str, List[str], Tuple[str, ...], List[Tuple[str, ...]]] = None,
left_index: bool = False, right_index: bool = False,
suffixes: Tuple[str, str] = ('_x', '_y')) -> 'DataFrame':
"""
Merge DataFrame objects with a database-style join.
The index of the resulting DataFrame will be one of the following:
- 0...n if no index is used for merging
- Index of the left DataFrame if merged only on the index of the right DataFrame
- Index of the right DataFrame if merged only on the index of the left DataFrame
- All involved indices if merged using the indices of both DataFrames
e.g. if `left` with indices (a, x) and `right` with indices (b, x), the result will
be an index (x, a, b)
Parameters
----------
right: Object to merge with.
how: Type of merge to be performed.
{'left', 'right', 'outer', 'inner'}, default 'inner'
left: use only keys from left frame, similar to a SQL left outer join; preserve key
order.
right: use only keys from right frame, similar to a SQL right outer join; preserve key
order.
outer: use union of keys from both frames, similar to a SQL full outer join; sort keys
lexicographically.
inner: use intersection of keys from both frames, similar to a SQL inner join;
preserve the order of the left keys.
on: Column or index level names to join on. These must be found in both DataFrames. If on
is None and not merging on indexes then this defaults to the intersection of the
columns in both DataFrames.
left_on: Column or index level names to join on in the left DataFrame. Can also
be an array or list of arrays of the length of the left DataFrame.
These arrays are treated as if they are columns.
right_on: Column or index level names to join on in the right DataFrame. Can also
be an array or list of arrays of the length of the right DataFrame.
These arrays are treated as if they are columns.
left_index: Use the index from the left DataFrame as the join key(s). If it is a
MultiIndex, the number of keys in the other DataFrame (either the index or a number of
columns) must match the number of levels.
right_index: Use the index from the right DataFrame as the join key. Same caveats as
left_index.
suffixes: Suffix to apply to overlapping column names in the left and right side,
respectively.
Returns
-------
DataFrame
A DataFrame of the two merged objects.
Examples
--------
>>> df1 = ks.DataFrame({'lkey': ['foo', 'bar', 'baz', 'foo'],
... 'value': [1, 2, 3, 5]},
... columns=['lkey', 'value'])
>>> df2 = ks.DataFrame({'rkey': ['foo', 'bar', 'baz', 'foo'],
... 'value': [5, 6, 7, 8]},
... columns=['rkey', 'value'])
>>> df1
lkey value
0 foo 1
1 bar 2
2 baz 3
3 foo 5
>>> df2
rkey value
0 foo 5
1 bar 6
2 baz 7
3 foo 8
Merge df1 and df2 on the lkey and rkey columns. The value columns have
the default suffixes, _x and _y, appended.
>>> merged = df1.merge(df2, left_on='lkey', right_on='rkey')
>>> merged.sort_values(by=['lkey', 'value_x', 'rkey', 'value_y']) # doctest: +ELLIPSIS
lkey value_x rkey value_y
...bar 2 bar 6
...baz 3 baz 7
...foo 1 foo 5
...foo 1 foo 8
...foo 5 foo 5
...foo 5 foo 8
>>> left_kdf = ks.DataFrame({'A': [1, 2]})
>>> right_kdf = ks.DataFrame({'B': ['x', 'y']}, index=[1, 2])
>>> left_kdf.merge(right_kdf, left_index=True, right_index=True).sort_index()
A B
1 2 x
>>> left_kdf.merge(right_kdf, left_index=True, right_index=True, how='left').sort_index()
A B
0 1 None
1 2 x
>>> left_kdf.merge(right_kdf, left_index=True, right_index=True, how='right').sort_index()
A B
1 2.0 x
2 NaN y
>>> left_kdf.merge(right_kdf, left_index=True, right_index=True, how='outer').sort_index()
A B
0 1.0 None
1 2.0 x
2 NaN y
Notes
-----
As described in #263, joining string columns currently returns None for missing values
instead of NaN.
"""
_to_list = lambda os: (os if os is None
else [os] if isinstance(os, tuple)
else [(os,)] if isinstance(os, str)
else [o if isinstance(o, tuple) else (o,) # type: ignore
for o in os])
if isinstance(right, ks.Series):
right = right.to_frame()
if on:
if left_on or right_on:
raise ValueError('Can only pass argument "on" OR "left_on" and "right_on", '
'not a combination of both.')
left_keys = _to_list(on)
right_keys = _to_list(on)
else:
# TODO: need special handling for multi-index.
if left_index:
left_keys = self._internal.index_columns
else:
left_keys = _to_list(left_on)
if right_index:
right_keys = right._internal.index_columns
else:
right_keys = _to_list(right_on)
if left_keys and not right_keys:
raise ValueError('Must pass right_on or right_index=True')
if right_keys and not left_keys:
raise ValueError('Must pass left_on or left_index=True')
if not left_keys and not right_keys:
common = list(self.columns.intersection(right.columns))
if len(common) == 0:
raise ValueError(
'No common columns to perform merge on. Merge options: '
'left_on=None, right_on=None, left_index=False, right_index=False')
left_keys = _to_list(common)
right_keys = _to_list(common)
if len(left_keys) != len(right_keys): # type: ignore
raise ValueError('len(left_keys) must equal len(right_keys)')
if how == 'full':
warnings.warn("Warning: While Koalas will accept 'full', you should use 'outer' " +
"instead to be compatible with the pandas merge API", UserWarning)
if how == 'outer':
# 'outer' in pandas equals 'full' in Spark
how = 'full'
if how not in ('inner', 'left', 'right', 'full'):
raise ValueError("The 'how' parameter has to be amongst the following values: ",
"['inner', 'left', 'right', 'outer']")
left_table = self._sdf.alias('left_table')
right_table = right._sdf.alias('right_table')
left_scol_for = lambda label: scol_for(left_table, self._internal.column_name_for(label))
right_scol_for = lambda label: scol_for(right_table, right._internal.column_name_for(label))
left_key_columns = [left_scol_for(label) for label in left_keys] # type: ignore
right_key_columns = [right_scol_for(label) for label in right_keys] # type: ignore
join_condition = reduce(lambda x, y: x & y,
[lkey == rkey for lkey, rkey
in zip(left_key_columns, right_key_columns)])
joined_table = left_table.join(right_table, join_condition, how=how)
# Unpack suffixes tuple for convenience
left_suffix = suffixes[0]
right_suffix = suffixes[1]
# Append suffixes to columns with the same name to avoid conflicts later
duplicate_columns = (set(self._internal.column_labels)
& set(right._internal.column_labels))
exprs = []
data_columns = []
column_labels = []
for label in self._internal.column_labels:
col = self._internal.column_name_for(label)
scol = left_scol_for(label)
if label in duplicate_columns:
if label in left_keys and label in right_keys: # type: ignore
right_scol = right_scol_for(label)
if how == 'right':
scol = right_scol
elif how == 'full':
scol = F.when(scol.isNotNull(), scol).otherwise(right_scol).alias(col)
else:
pass
else:
col = col + left_suffix
scol = scol.alias(col)
label = tuple([label[0] + left_suffix] + list(label[1:]))
exprs.append(scol)
data_columns.append(col)
column_labels.append(label)
for label in right._internal.column_labels:
col = right._internal.column_name_for(label)
scol = right_scol_for(label)
if label in duplicate_columns:
if label in left_keys and label in right_keys: # type: ignore
continue
else:
col = col + right_suffix
scol = scol.alias(col)
label = tuple([label[0] + right_suffix] + list(label[1:]))
exprs.append(scol)
data_columns.append(col)
column_labels.append(label)
left_index_scols = self._internal.index_scols
right_index_scols = right._internal.index_scols
# Retain indices if they are used for joining
if left_index:
if right_index:
if how in ('inner', 'left'):
exprs.extend(left_index_scols)
index_map = self._internal.index_map
elif how == 'right':
exprs.extend(right_index_scols)
index_map = right._internal.index_map
else:
index_map = []
for (col, name), left_scol, right_scol in zip(self._internal.index_map,
left_index_scols,
right_index_scols):
scol = F.when(left_scol.isNotNull(), left_scol).otherwise(right_scol)
exprs.append(scol.alias(col))
index_map.append((col, name))
else:
exprs.extend(right_index_scols)
index_map = right._internal.index_map
elif right_index:
exprs.extend(left_index_scols)
index_map = self._internal.index_map
else:
index_map = []
selected_columns = joined_table.select(*exprs)
internal = _InternalFrame(sdf=selected_columns,
index_map=index_map if index_map else None,
column_labels=column_labels,
column_scols=[scol_for(selected_columns, col)
for col in data_columns])
return DataFrame(internal)
def join(self, right: 'DataFrame',
on: Optional[Union[str, List[str], Tuple[str, ...], List[Tuple[str, ...]]]] = None,
how: str = 'left', lsuffix: str = '', rsuffix: str = '') -> 'DataFrame':
"""
Join columns of another DataFrame.
Join columns with `right` DataFrame either on index or on a key column. Efficiently join
multiple DataFrame objects by index at once by passing a list.
Parameters
----------
right: DataFrame, Series
on: str, list of str, or array-like, optional
Column or index level name(s) in the caller to join on the index in `right`, otherwise
joins index-on-index. If multiple values given, the `right` DataFrame must have a
MultiIndex. Can pass an array as the join key if it is not already contained in the
calling DataFrame. Like an Excel VLOOKUP operation.
how: {'left', 'right', 'outer', 'inner'}, default 'left'
How to handle the operation of the two objects.
* left: use `left` frame’s index (or column if on is specified).
* right: use `right`’s index.
* outer: form union of `left` frame’s index (or column if on is specified) with
right’s index, and sort it. lexicographically.
* inner: form intersection of `left` frame’s index (or column if on is specified)
with `right`’s index, preserving the order of the `left`’s one.
lsuffix : str, default ''
Suffix to use from left frame's overlapping columns.
rsuffix : str, default ''
Suffix to use from `right` frame's overlapping columns.
Returns
-------
DataFrame
A dataframe containing columns from both the `left` and `right`.
See Also
--------
DataFrame.merge: For column(s)-on-columns(s) operations.
Notes
-----
Parameters on, lsuffix, and rsuffix are not supported when passing a list of DataFrame
objects.
Examples
--------
>>> kdf1 = ks.DataFrame({'key': ['K0', 'K1', 'K2', 'K3'],
... 'A': ['A0', 'A1', 'A2', 'A3']},
... columns=['key', 'A'])
>>> kdf2 = ks.DataFrame({'key': ['K0', 'K1', 'K2'],
... 'B': ['B0', 'B1', 'B2']},
... columns=['key', 'B'])
>>> kdf1
key A
0 K0 A0
1 K1 A1
2 K2 A2
3 K3 A3
>>> kdf2
key B
0 K0 B0
1 K1 B1
2 K2 B2
Join DataFrames using their indexes.
>>> join_kdf = kdf1.join(kdf2, lsuffix='_left', rsuffix='_right')
>>> join_kdf.sort_values(by=join_kdf.columns)
key_left A key_right B
0 K0 A0 K0 B0
1 K1 A1 K1 B1
2 K2 A2 K2 B2
3 K3 A3 None None
If we want to join using the key columns, we need to set key to be the index in both df and
right. The joined DataFrame will have key as its index.
>>> join_kdf = kdf1.set_index('key').join(kdf2.set_index('key'))
>>> join_kdf.sort_values(by=join_kdf.columns) # doctest: +NORMALIZE_WHITESPACE
A B
key
K0 A0 B0
K1 A1 B1
K2 A2 B2
K3 A3 None
Another option to join using the key columns is to use the on parameter. DataFrame.join
always uses right’s index but we can use any column in df. This method preserves the
original DataFrame’s index in the result.
>>> join_kdf = kdf1.join(kdf2.set_index('key'), on='key')
>>> join_kdf.index
Int64Index([0, 1, 2, 3], dtype='int64')
"""
if isinstance(right, ks.Series):
common = list(self.columns.intersection([right.name]))
else:
common = list(self.columns.intersection(right.columns))
if len(common) > 0 and not lsuffix and not rsuffix:
raise ValueError(
"columns overlap but no suffix specified: "
"{rename}".format(rename=common))
if on:
self = self.set_index(on)
join_kdf = self.merge(right, left_index=True, right_index=True, how=how,
suffixes=(lsuffix, rsuffix)).reset_index()
else:
join_kdf = self.merge(right, left_index=True, right_index=True, how=how,
suffixes=(lsuffix, rsuffix))
return join_kdf
def append(self, other: 'DataFrame', ignore_index: bool = False,
verify_integrity: bool = False, sort: bool = False) -> 'DataFrame':
"""
Append rows of other to the end of caller, returning a new object.
Columns in other that are not in the caller are added as new columns.
Parameters
----------
other : DataFrame or Series/dict-like object, or list of these
The data to append.
ignore_index : boolean, default False
If True, do not use the index labels.
verify_integrity : boolean, default False
If True, raise ValueError on creating index with duplicates.
sort : boolean, default False
Currently not supported.
Returns
-------
appended : DataFrame
Examples
--------
>>> df = ks.DataFrame([[1, 2], [3, 4]], columns=list('AB'))
>>> df.append(df)
A B
0 1 2
1 3 4
0 1 2
1 3 4
>>> df.append(df, ignore_index=True)
A B
0 1 2
1 3 4
2 1 2
3 3 4
"""
if isinstance(other, ks.Series):
raise ValueError("DataFrames.append() does not support appending Series to DataFrames")
if sort:
raise NotImplementedError("The 'sort' parameter is currently not supported")
if not ignore_index:
index_scols = self._internal.index_scols
if len(index_scols) != len(other._internal.index_scols):
raise ValueError("Both DataFrames have to have the same number of index levels")
if verify_integrity and len(index_scols) > 0:
if (self._sdf.select(index_scols)
.intersect(other._sdf.select(other._internal.index_scols))
.count()) > 0:
raise ValueError("Indices have overlapping values")
# Lazy import to avoid circular dependency issues
from databricks.koalas.namespace import concat
return concat([self, other], ignore_index=ignore_index)
# TODO: add 'filter_func' and 'errors' parameter
def update(self, other: 'DataFrame', join: str = 'left', overwrite: bool = True):
"""
Modify in place using non-NA values from another DataFrame.
Aligns on indices. There is no return value.
Parameters
----------
other : DataFrame, or Series
join : 'left', default 'left'
Only left join is implemented, keeping the index and columns of the original object.
overwrite : bool, default True
How to handle non-NA values for overlapping keys:
* True: overwrite original DataFrame's values with values from `other`.
* False: only update values that are NA in the original DataFrame.
Returns
-------
None : method directly changes calling object
See Also
--------
DataFrame.merge : For column(s)-on-columns(s) operations.
Examples
--------
>>> df = ks.DataFrame({'A': [1, 2, 3], 'B': [400, 500, 600]}, columns=['A', 'B'])
>>> new_df = ks.DataFrame({'B': [4, 5, 6], 'C': [7, 8, 9]}, columns=['B', 'C'])
>>> df.update(new_df)
>>> df.sort_index()
A B
0 1 4
1 2 5
2 3 6
The DataFrame's length does not increase as a result of the update,
only values at matching index/column labels are updated.
>>> df = ks.DataFrame({'A': ['a', 'b', 'c'], 'B': ['x', 'y', 'z']}, columns=['A', 'B'])
>>> new_df = ks.DataFrame({'B': ['d', 'e', 'f', 'g', 'h', 'i']}, columns=['B'])
>>> df.update(new_df)
>>> df.sort_index()
A B
0 a d
1 b e
2 c f
For Series, it's name attribute must be set.
>>> df = ks.DataFrame({'A': ['a', 'b', 'c'], 'B': ['x', 'y', 'z']}, columns=['A', 'B'])
>>> new_column = ks.Series(['d', 'e'], name='B', index=[0, 2])
>>> df.update(new_column)
>>> df.sort_index()
A B
0 a d
1 b y
2 c e
If `other` contains None the corresponding values are not updated in the original dataframe.
>>> df = ks.DataFrame({'A': [1, 2, 3], 'B': [400, 500, 600]}, columns=['A', 'B'])
>>> new_df = ks.DataFrame({'B': [4, None, 6]}, columns=['B'])
>>> df.update(new_df)
>>> df.sort_index()
A B
0 1 4.0
1 2 500.0
2 3 6.0
"""
if join != 'left':
raise NotImplementedError("Only left join is supported")
if isinstance(other, ks.Series):
other = DataFrame(other)
update_columns = list(set(self._internal.column_labels)
.intersection(set(other._internal.column_labels)))
update_sdf = self.join(other[update_columns], rsuffix='_new')._sdf
for column_labels in update_columns:
column_name = self._internal.column_name_for(column_labels)
old_col = scol_for(update_sdf, column_name)
new_col = scol_for(update_sdf, other._internal.column_name_for(column_labels) + '_new')
if overwrite:
update_sdf = update_sdf.withColumn(column_name, F.when(new_col.isNull(), old_col)
.otherwise(new_col))
else:
update_sdf = update_sdf.withColumn(column_name, F.when(old_col.isNull(), new_col)
.otherwise(old_col))
sdf = update_sdf.select([scol_for(update_sdf, col)
for col in self._internal.columns] +
list(HIDDEN_COLUMNS))
internal = self._internal.copy(sdf=sdf,
column_scols=[scol_for(sdf, col)
for col in self._internal.data_columns])
self._internal = internal
def sample(self, n: Optional[int] = None, frac: Optional[float] = None, replace: bool = False,
random_state: Optional[int] = None) -> 'DataFrame':
"""
Return a random sample of items from an axis of object.
Please call this function using named argument by specifying the ``frac`` argument.
You can use `random_state` for reproducibility. However, note that different from pandas,
specifying a seed in Koalas/Spark does not guarantee the sampled rows will be fixed. The
result set depends on not only the seed, but also how the data is distributed across
machines and to some extent network randomness when shuffle operations are involved. Even
in the simplest case, the result set will depend on the system's CPU core count.
Parameters
----------
n : int, optional
Number of items to return. This is currently NOT supported. Use frac instead.
frac : float, optional
Fraction of axis items to return.
replace : bool, default False
Sample with or without replacement.
random_state : int, optional
Seed for the random number generator (if int).
Returns
-------
Series or DataFrame
A new object of same type as caller containing the sampled items.
Examples
--------
>>> df = ks.DataFrame({'num_legs': [2, 4, 8, 0],
... 'num_wings': [2, 0, 0, 0],
... 'num_specimen_seen': [10, 2, 1, 8]},
... index=['falcon', 'dog', 'spider', 'fish'],
... columns=['num_legs', 'num_wings', 'num_specimen_seen'])
>>> df # doctest: +SKIP
num_legs num_wings num_specimen_seen
falcon 2 2 10
dog 4 0 2
spider 8 0 1
fish 0 0 8
A random 25% sample of the ``DataFrame``.
Note that we use `random_state` to ensure the reproducibility of
the examples.
>>> df.sample(frac=0.25, random_state=1) # doctest: +SKIP
num_legs num_wings num_specimen_seen
falcon 2 2 10
fish 0 0 8
Extract 25% random elements from the ``Series`` ``df['num_legs']``, with replacement,
so the same items could appear more than once.
>>> df['num_legs'].sample(frac=0.4, replace=True, random_state=1) # doctest: +SKIP
falcon 2
spider 8
spider 8
Name: num_legs, dtype: int64
Specifying the exact number of items to return is not supported at the moment.
>>> df.sample(n=5) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
NotImplementedError: Function sample currently does not support specifying ...
"""
# Note: we don't run any of the doctests because the result can change depending on the
# system's core count.
if n is not None:
raise NotImplementedError("Function sample currently does not support specifying "
"exact number of items to return. Use frac instead.")
if frac is None:
raise ValueError("frac must be specified.")
sdf = self._sdf.sample(withReplacement=replace, fraction=frac, seed=random_state)
return DataFrame(self._internal.copy(sdf=sdf))
def astype(self, dtype) -> 'DataFrame':
"""
Cast a Koalas object to a specified dtype ``dtype``.
Parameters
----------
dtype : data type, or dict of column name -> data type
Use a numpy.dtype or Python type to cast entire Koalas object to
the same type. Alternatively, use {col: dtype, ...}, where col is a
column label and dtype is a numpy.dtype or Python type to cast one
or more of the DataFrame's columns to column-specific types.
Returns
-------
casted : same type as caller
See Also
--------
to_datetime : Convert argument to datetime.
Examples
--------
>>> df = ks.DataFrame({'a': [1, 2, 3], 'b': [1, 2, 3]}, dtype='int64')
>>> df
a b
0 1 1
1 2 2
2 3 3
Convert to float type:
>>> df.astype('float')
a b
0 1.0 1.0
1 2.0 2.0
2 3.0 3.0
Convert to int64 type back:
>>> df.astype('int64')
a b
0 1 1
1 2 2
2 3 3
Convert column a to float type:
>>> df.astype({'a': float})
a b
0 1.0 1
1 2.0 2
2 3.0 3
"""
applied = []
if is_dict_like(dtype):
for col_name in dtype.keys():
if col_name not in self.columns:
raise KeyError('Only a column name can be used for the '
'key in a dtype mappings argument.')
for col_name, col in self.items():
if col_name in dtype:
applied.append(col.astype(dtype=dtype[col_name]))
else:
applied.append(col)
else:
for col_name, col in self.items():
applied.append(col.astype(dtype=dtype))
return DataFrame(self._internal.with_new_columns(applied))
def add_prefix(self, prefix):
"""
Prefix labels with string `prefix`.
For Series, the row labels are prefixed.
For DataFrame, the column labels are prefixed.
Parameters
----------
prefix : str
The string to add before each label.
Returns
-------
DataFrame
New DataFrame with updated labels.
See Also
--------
Series.add_prefix: Prefix row labels with string `prefix`.
Series.add_suffix: Suffix row labels with string `suffix`.
DataFrame.add_suffix: Suffix column labels with string `suffix`.
Examples
--------
>>> df = ks.DataFrame({'A': [1, 2, 3, 4], 'B': [3, 4, 5, 6]}, columns=['A', 'B'])
>>> df
A B
0 1 3
1 2 4
2 3 5
3 4 6
>>> df.add_prefix('col_')
col_A col_B
0 1 3
1 2 4
2 3 5
3 4 6
"""
assert isinstance(prefix, str)
return self._apply_series_op(
lambda kser: kser.rename(tuple([prefix + i for i in kser._internal.column_labels[0]])))
def add_suffix(self, suffix):
"""
Suffix labels with string `suffix`.
For Series, the row labels are suffixed.
For DataFrame, the column labels are suffixed.
Parameters
----------
suffix : str
The string to add before each label.
Returns
-------
DataFrame
New DataFrame with updated labels.
See Also
--------
Series.add_prefix: Prefix row labels with string `prefix`.
Series.add_suffix: Suffix row labels with string `suffix`.
DataFrame.add_prefix: Prefix column labels with string `prefix`.
Examples
--------
>>> df = ks.DataFrame({'A': [1, 2, 3, 4], 'B': [3, 4, 5, 6]}, columns=['A', 'B'])
>>> df
A B
0 1 3
1 2 4
2 3 5
3 4 6
>>> df.add_suffix('_col')
A_col B_col
0 1 3
1 2 4
2 3 5
3 4 6
"""
assert isinstance(suffix, str)
return self._apply_series_op(
lambda kser: kser.rename(tuple([i + suffix for i in kser._internal.column_labels[0]])))
# TODO: include, and exclude should be implemented.
def describe(self, percentiles: Optional[List[float]] = None) -> 'DataFrame':
"""
Generate descriptive statistics that summarize the central tendency,
dispersion and shape of a dataset's distribution, excluding
``NaN`` values.
Analyzes both numeric and object series, as well
as ``DataFrame`` column sets of mixed data types. The output
will vary depending on what is provided. Refer to the notes
below for more detail.
Parameters
----------
percentiles : list of ``float`` in range [0.0, 1.0], default [0.25, 0.5, 0.75]
A list of percentiles to be computed.
Returns
-------
Series or DataFrame
Summary statistics of the Series or Dataframe provided.
See Also
--------
DataFrame.count: Count number of non-NA/null observations.
DataFrame.max: Maximum of the values in the object.
DataFrame.min: Minimum of the values in the object.
DataFrame.mean: Mean of the values.
DataFrame.std: Standard deviation of the observations.
Notes
-----
For numeric data, the result's index will include ``count``,
``mean``, ``std``, ``min``, ``25%``, ``50%``, ``75%``, ``max``.
Currently only numeric data is supported.
Examples
--------
Describing a numeric ``Series``.
>>> s = ks.Series([1, 2, 3])
>>> s.describe()
count 3.0
mean 2.0
std 1.0
min 1.0
25% 1.0
50% 2.0
75% 3.0
max 3.0
Name: 0, dtype: float64
Describing a ``DataFrame``. Only numeric fields are returned.
>>> df = ks.DataFrame({'numeric1': [1, 2, 3],
... 'numeric2': [4.0, 5.0, 6.0],
... 'object': ['a', 'b', 'c']
... },
... columns=['numeric1', 'numeric2', 'object'])
>>> df.describe()
numeric1 numeric2
count 3.0 3.0
mean 2.0 5.0
std 1.0 1.0
min 1.0 4.0
25% 1.0 4.0
50% 2.0 5.0
75% 3.0 6.0
max 3.0 6.0
For multi-index columns:
>>> df.columns = [('num', 'a'), ('num', 'b'), ('obj', 'c')]
>>> df.describe() # doctest: +NORMALIZE_WHITESPACE
num
a b
count 3.0 3.0
mean 2.0 5.0
std 1.0 1.0
min 1.0 4.0
25% 1.0 4.0
50% 2.0 5.0
75% 3.0 6.0
max 3.0 6.0
>>> df[('num', 'b')].describe()
count 3.0
mean 5.0
std 1.0
min 4.0
25% 4.0
50% 5.0
75% 6.0
max 6.0
Name: (num, b), dtype: float64
Describing a ``DataFrame`` and selecting custom percentiles.
>>> df = ks.DataFrame({'numeric1': [1, 2, 3],
... 'numeric2': [4.0, 5.0, 6.0]
... },
... columns=['numeric1', 'numeric2'])
>>> df.describe(percentiles = [0.85, 0.15])
numeric1 numeric2
count 3.0 3.0
mean 2.0 5.0
std 1.0 1.0
min 1.0 4.0
15% 1.0 4.0
50% 2.0 5.0
85% 3.0 6.0
max 3.0 6.0
Describing a column from a ``DataFrame`` by accessing it as
an attribute.
>>> df.numeric1.describe()
count 3.0
mean 2.0
std 1.0
min 1.0
25% 1.0
50% 2.0
75% 3.0
max 3.0
Name: numeric1, dtype: float64
Describing a column from a ``DataFrame`` by accessing it as
an attribute and selecting custom percentiles.
>>> df.numeric1.describe(percentiles = [0.85, 0.15])
count 3.0
mean 2.0
std 1.0
min 1.0
15% 1.0
50% 2.0
85% 3.0
max 3.0
Name: numeric1, dtype: float64
"""
exprs = []
column_labels = []
for label in self._internal.column_labels:
scol = self._internal.scol_for(label)
spark_type = self._internal.spark_type_for(label)
if isinstance(spark_type, DoubleType) or isinstance(spark_type, FloatType):
exprs.append(F.nanvl(scol, F.lit(None))
.alias(self._internal.column_name_for(label)))
column_labels.append(label)
elif isinstance(spark_type, NumericType):
exprs.append(scol)
column_labels.append(label)
if len(exprs) == 0:
raise ValueError("Cannot describe a DataFrame without columns")
if percentiles is not None:
if any((p < 0.0) or (p > 1.0) for p in percentiles):
raise ValueError("Percentiles should all be in the interval [0, 1]")
# appending 50% if not in percentiles already
percentiles = (percentiles + [0.5]) if 0.5 not in percentiles else percentiles
else:
percentiles = [0.25, 0.5, 0.75]
formatted_perc = ["{:.0%}".format(p) for p in sorted(percentiles)]
stats = ["count", "mean", "stddev", "min", *formatted_perc, "max"]
sdf = self._sdf.select(*exprs).summary(stats)
sdf = sdf.replace("stddev", "std", subset='summary')
internal = _InternalFrame(sdf=sdf,
index_map=[('summary', None)],
column_labels=column_labels,
column_scols=[scol_for(sdf, self._internal.column_name_for(label))
for label in column_labels])
return DataFrame(internal).astype('float64')
# TODO: implements 'keep' parameters
def drop_duplicates(self, subset=None, inplace=False):
"""
Return DataFrame with duplicate rows removed, optionally only
considering certain columns.
Parameters
----------
subset : column label or sequence of labels, optional
Only consider certain columns for identifying duplicates, by
default use all of the columns
inplace : boolean, default False
Whether to drop duplicates in place or to return a copy
Returns
-------
DataFrame
>>> df = ks.DataFrame(
... {'a': [1, 2, 2, 2, 3], 'b': ['a', 'a', 'a', 'c', 'd']}, columns = ['a', 'b'])
>>> df
a b
0 1 a
1 2 a
2 2 a
3 2 c
4 3 d
>>> df.drop_duplicates().sort_values(['a', 'b'])
a b
0 1 a
1 2 a
3 2 c
4 3 d
>>> df.drop_duplicates('a').sort_values(['a', 'b'])
a b
0 1 a
1 2 a
4 3 d
>>> df.drop_duplicates(['a', 'b']).sort_values(['a', 'b'])
a b
0 1 a
1 2 a
3 2 c
4 3 d
"""
if subset is None:
subset = self._internal.column_labels
elif isinstance(subset, str):
subset = [(subset,)]
elif isinstance(subset, tuple):
subset = [subset]
else:
subset = [sub if isinstance(sub, tuple) else (sub,) for sub in subset]
sdf = self._sdf.drop(*HIDDEN_COLUMNS) \
.drop_duplicates(subset=[self._internal.column_name_for(label) for label in subset])
internal = self._internal.copy(sdf=sdf)
if inplace:
self._internal = internal
else:
return DataFrame(internal)
def reindex(self, labels: Optional[Any] = None, index: Optional[Any] = None,
columns: Optional[Any] = None, axis: Optional[Union[int, str]] = None,
copy: Optional[bool] = True, fill_value: Optional[Any] = None) -> 'DataFrame':
"""
Conform DataFrame to new index with optional filling logic, placing
NA/NaN in locations having no value in the previous index. A new object
is produced unless the new index is equivalent to the current one and
``copy=False``.
Parameters
----------
labels: array-like, optional
New labels / index to conform the axis specified by ‘axis’ to.
index, columns: array-like, optional
New labels / index to conform to, should be specified using keywords.
Preferably an Index object to avoid duplicating data
axis: int or str, optional
Axis to target. Can be either the axis name (‘index’, ‘columns’) or
number (0, 1).
copy : bool, default True
Return a new object, even if the passed indexes are the same.
fill_value : scalar, default np.NaN
Value to use for missing values. Defaults to NaN, but can be any
"compatible" value.
Returns
-------
DataFrame with changed index.
See Also
--------
DataFrame.set_index : Set row labels.
DataFrame.reset_index : Remove row labels or move them to new columns.
Examples
--------
``DataFrame.reindex`` supports two calling conventions
* ``(index=index_labels, columns=column_labels, ...)``
* ``(labels, axis={'index', 'columns'}, ...)``
We *highly* recommend using keyword arguments to clarify your
intent.
Create a dataframe with some fictional data.
>>> index = ['Firefox', 'Chrome', 'Safari', 'IE10', 'Konqueror']
>>> df = ks.DataFrame({
... 'http_status': [200, 200, 404, 404, 301],
... 'response_time': [0.04, 0.02, 0.07, 0.08, 1.0]},
... index=index,
... columns=['http_status', 'response_time'])
>>> df
http_status response_time
Firefox 200 0.04
Chrome 200 0.02
Safari 404 0.07
IE10 404 0.08
Konqueror 301 1.00
Create a new index and reindex the dataframe. By default
values in the new index that do not have corresponding
records in the dataframe are assigned ``NaN``.
>>> new_index= ['Safari', 'Iceweasel', 'Comodo Dragon', 'IE10',
... 'Chrome']
>>> df.reindex(new_index).sort_index()
... # doctest: +NORMALIZE_WHITESPACE
http_status response_time
Chrome 200.0 0.02
Comodo Dragon NaN NaN
IE10 404.0 0.08
Iceweasel NaN NaN
Safari 404.0 0.07
We can fill in the missing values by passing a value to
the keyword ``fill_value``.
>>> df.reindex(new_index, fill_value=0, copy=False).sort_index()
... # doctest: +NORMALIZE_WHITESPACE
http_status response_time
Chrome 200 0.02
Comodo Dragon 0 0.00
IE10 404 0.08
Iceweasel 0 0.00
Safari 404 0.07
We can also reindex the columns.
>>> df.reindex(columns=['http_status', 'user_agent']).sort_index()
... # doctest: +NORMALIZE_WHITESPACE
http_status user_agent
Chrome 200 NaN
Comodo Dragon 0 NaN
IE10 404 NaN
Iceweasel 0 NaN
Safari 404 NaN
Or we can use "axis-style" keyword arguments
>>> df.reindex(['http_status', 'user_agent'], axis="columns").sort_index()
... # doctest: +NORMALIZE_WHITESPACE
http_status user_agent
Chrome 200 NaN
Comodo Dragon 0 NaN
IE10 404 NaN
Iceweasel 0 NaN
Safari 404 NaN
To further illustrate the filling functionality in
``reindex``, we will create a dataframe with a
monotonically increasing index (for example, a sequence
of dates).
>>> date_index = pd.date_range('1/1/2010', periods=6, freq='D')
>>> df2 = ks.DataFrame({"prices": [100, 101, np.nan, 100, 89, 88]},
... index=date_index)
>>> df2.sort_index() # doctest: +NORMALIZE_WHITESPACE
prices
2010-01-01 100.0
2010-01-02 101.0
2010-01-03 NaN
2010-01-04 100.0
2010-01-05 89.0
2010-01-06 88.0
Suppose we decide to expand the dataframe to cover a wider
date range.
>>> date_index2 = pd.date_range('12/29/2009', periods=10, freq='D')
>>> df2.reindex(date_index2).sort_index() # doctest: +NORMALIZE_WHITESPACE
prices
2009-12-29 NaN
2009-12-30 NaN
2009-12-31 NaN
2010-01-01 100.0
2010-01-02 101.0
2010-01-03 NaN
2010-01-04 100.0
2010-01-05 89.0
2010-01-06 88.0
2010-01-07 NaN
"""
if axis is not None and (index is not None or columns is not None):
raise TypeError("Cannot specify both 'axis' and any of 'index' or 'columns'.")
if labels is not None:
axis = validate_axis(axis)
if axis == 0:
index = labels
elif axis == 1:
columns = labels
else:
raise ValueError("No axis named %s for object type %s." % (axis, type(axis)))
if index is not None and not is_list_like(index):
raise TypeError("Index must be called with a collection of some kind, "
"%s was passed" % type(index))
if columns is not None and not is_list_like(columns):
raise TypeError("Columns must be called with a collection of some kind, "
"%s was passed" % type(columns))
df = self.copy()
if index is not None:
df = DataFrame(df._reindex_index(index))
if columns is not None:
df = DataFrame(df._reindex_columns(columns))
# Process missing values.
if fill_value is not None:
df = df.fillna(fill_value)
# Copy
if copy:
return df.copy()
else:
self._internal = df._internal
return self
def _reindex_index(self, index):
# When axis is index, we can mimic pandas' by a right outer join.
assert len(self._internal.index_columns) <= 1, "Index should be single column or not set."
index_column = self._internal.index_columns[0]
kser = ks.Series(list(index))
labels = kser._internal._sdf.select(kser._scol.alias(index_column))
joined_df = self._sdf.drop(NATURAL_ORDER_COLUMN_NAME) \
.join(labels, on=index_column, how="right")
internal = self._internal.copy(sdf=joined_df)
return internal
def _reindex_columns(self, columns):
level = self._internal.column_labels_level
if level > 1:
label_columns = list(columns)
for col in label_columns:
if not isinstance(col, tuple):
raise TypeError('Expected tuple, got {}'.format(type(col)))
else:
label_columns = [(col,) for col in columns]
for col in label_columns:
if len(col) != level:
raise ValueError("shape (1,{}) doesn't match the shape (1,{})"
.format(len(col), level))
scols, labels = [], []
for label in label_columns:
if label in self._internal.column_labels:
scols.append(self._internal.scol_for(label))
else:
scols.append(F.lit(np.nan).alias(name_like_string(label)))
labels.append(label)
return self._internal.with_new_columns(scols, column_labels=labels)
def melt(self, id_vars=None, value_vars=None, var_name=None,
value_name='value'):
"""
Unpivot a DataFrame from wide format to long format, optionally
leaving identifier variables set.
This function is useful to massage a DataFrame into a format where one
or more columns are identifier variables (`id_vars`), while all other
columns, considered measured variables (`value_vars`), are "unpivoted" to
the row axis, leaving just two non-identifier columns, 'variable' and
'value'.
Parameters
----------
frame : DataFrame
id_vars : tuple, list, or ndarray, optional
Column(s) to use as identifier variables.
value_vars : tuple, list, or ndarray, optional
Column(s) to unpivot. If not specified, uses all columns that
are not set as `id_vars`.
var_name : scalar, default 'variable'
Name to use for the 'variable' column. If None it uses `frame.columns.name` or
‘variable’.
value_name : scalar, default 'value'
Name to use for the 'value' column.
Returns
-------
DataFrame
Unpivoted DataFrame.
Examples
--------
>>> df = ks.DataFrame({'A': {0: 'a', 1: 'b', 2: 'c'},
... 'B': {0: 1, 1: 3, 2: 5},
... 'C': {0: 2, 1: 4, 2: 6}},
... columns=['A', 'B', 'C'])
>>> df
A B C
0 a 1 2
1 b 3 4
2 c 5 6
>>> ks.melt(df)
variable value
0 A a
1 B 1
2 C 2
3 A b
4 B 3
5 C 4
6 A c
7 B 5
8 C 6
>>> df.melt(id_vars='A')
A variable value
0 a B 1
1 a C 2
2 b B 3
3 b C 4
4 c B 5
5 c C 6
>>> df.melt(value_vars='A')
variable value
0 A a
1 A b
2 A c
>>> ks.melt(df, id_vars=['A', 'B'])
A B variable value
0 a 1 C 2
1 b 3 C 4
2 c 5 C 6
>>> df.melt(id_vars=['A'], value_vars=['C'])
A variable value
0 a C 2
1 b C 4
2 c C 6
The names of 'variable' and 'value' columns can be customized:
>>> ks.melt(df, id_vars=['A'], value_vars=['B'],
... var_name='myVarname', value_name='myValname')
A myVarname myValname
0 a B 1
1 b B 3
2 c B 5
"""
column_labels = self._internal.column_labels
if id_vars is None:
id_vars = []
else:
if isinstance(id_vars, str):
id_vars = [(id_vars,)]
elif isinstance(id_vars, tuple):
if self._internal.column_labels_level == 1:
id_vars = [idv if isinstance(idv, tuple) else (idv,)
for idv in id_vars]
else:
raise ValueError('id_vars must be a list of tuples'
' when columns are a MultiIndex')
else:
id_vars = [idv if isinstance(idv, tuple) else (idv,)
for idv in id_vars]
non_existence_col = [idv for idv in id_vars if idv not in column_labels]
if len(non_existence_col) != 0:
raveled_column_labels = np.ravel(column_labels)
missing = [nec for nec in np.ravel(non_existence_col)
if nec not in raveled_column_labels]
if len(missing) != 0:
raise KeyError("The following 'id_vars' are not present"
" in the DataFrame: {}".format(missing))
else:
raise KeyError("None of {} are in the {}"
.format(non_existence_col, column_labels))
if value_vars is None:
value_vars = []
else:
if isinstance(value_vars, str):
value_vars = [(value_vars,)]
elif isinstance(value_vars, tuple):
if self._internal.column_labels_level == 1:
value_vars = [valv if isinstance(valv, tuple) else (valv,)
for valv in value_vars]
else:
raise ValueError('value_vars must be a list of tuples'
' when columns are a MultiIndex')
else:
value_vars = [valv if isinstance(valv, tuple) else (valv,)
for valv in value_vars]
non_existence_col = [valv for valv in value_vars if valv not in column_labels]
if len(non_existence_col) != 0:
raveled_column_labels = np.ravel(column_labels)
missing = [nec for nec in np.ravel(non_existence_col)
if nec not in raveled_column_labels]
if len(missing) != 0:
raise KeyError("The following 'value_vars' are not present"
" in the DataFrame: {}".format(missing))
else:
raise KeyError("None of {} are in the {}"
.format(non_existence_col, column_labels))
if len(value_vars) == 0:
value_vars = column_labels
column_labels = [label for label in column_labels if label not in id_vars]
sdf = self._sdf
if var_name is None:
if self._internal.column_label_names is not None:
var_name = self._internal.column_label_names
elif self._internal.column_labels_level == 1:
var_name = ['variable']
else:
var_name = ['variable_{}'.format(i)
for i in range(self._internal.column_labels_level)]
elif isinstance(var_name, str):
var_name = [var_name]
pairs = F.explode(F.array(*[
F.struct(*(
[F.lit(c).alias(name) for c, name in zip(label, var_name)] +
[self._internal.scol_for(label).alias(value_name)])
) for label in column_labels if label in value_vars]))
columns = ([self._internal.scol_for(label).alias(name_like_string(label))
for label in id_vars] +
[F.col("pairs.%s" % name)
for name in var_name[:self._internal.column_labels_level]] +
[F.col("pairs.%s" % value_name)])
exploded_df = sdf.withColumn("pairs", pairs).select(columns)
return DataFrame(exploded_df)
# TODO: axis, skipna, and many arguments should be implemented.
def all(self, axis: Union[int, str] = 0) -> bool:
"""
Return whether all elements are True.
Returns True unless there is at least one element within a series that is
False or equivalent (e.g. zero or empty)
Parameters
----------
axis : {0 or 'index'}, default 0
Indicate which axis or axes should be reduced.
* 0 / 'index' : reduce the index, return a Series whose index is the
original column labels.
Examples
--------
Create a dataframe from a dictionary.
>>> df = ks.DataFrame({
... 'col1': [True, True, True],
... 'col2': [True, False, False],
... 'col3': [0, 0, 0],
... 'col4': [1, 2, 3],
... 'col5': [True, True, None],
... 'col6': [True, False, None]},
... columns=['col1', 'col2', 'col3', 'col4', 'col5', 'col6'])
Default behaviour checks if column-wise values all return a boolean.
>>> df.all()
col1 True
col2 False
col3 False
col4 True
col5 True
col6 False
Name: all, dtype: bool
Returns
-------
Series
"""
axis = validate_axis(axis)
if axis != 0:
raise NotImplementedError('axis should be either 0 or "index" currently.')
applied = []
column_labels = self._internal.column_labels
for label in column_labels:
col = self[label]._scol
all_col = F.min(F.coalesce(col.cast('boolean'), F.lit(True)))
applied.append(F.when(all_col.isNull(), True).otherwise(all_col))
# TODO: there is a similar logic to transpose in, for instance,
# DataFrame.any, Series.quantile. Maybe we should deduplicate it.
sdf = self._sdf
value_column = "value"
cols = []
for label, applied_col in zip(column_labels, applied):
cols.append(F.struct(
[F.lit(col).alias(SPARK_INDEX_NAME_FORMAT(i)) for i, col in enumerate(label)] +
[applied_col.alias(value_column)]))
sdf = sdf.select(
F.array(*cols).alias("arrays")
).select(F.explode(F.col("arrays")))
sdf = sdf.selectExpr("col.*")
index_column_name = lambda i: (None if self._internal.column_label_names is None
else (self._internal.column_label_names[i],))
internal = self._internal.copy(
sdf=sdf,
index_map=[(SPARK_INDEX_NAME_FORMAT(i), index_column_name(i))
for i in range(self._internal.column_labels_level)],
column_labels=None,
column_scols=[scol_for(sdf, value_column)],
column_label_names=None)
return DataFrame(internal)[value_column].rename("all")
# TODO: axis, skipna, and many arguments should be implemented.
def any(self, axis: Union[int, str] = 0) -> bool:
"""
Return whether any element is True.
Returns False unless there is at least one element within a series that is
True or equivalent (e.g. non-zero or non-empty).
Parameters
----------
axis : {0 or 'index'}, default 0
Indicate which axis or axes should be reduced.
* 0 / 'index' : reduce the index, return a Series whose index is the
original column labels.
Examples
--------
Create a dataframe from a dictionary.
>>> df = ks.DataFrame({
... 'col1': [False, False, False],
... 'col2': [True, False, False],
... 'col3': [0, 0, 1],
... 'col4': [0, 1, 2],
... 'col5': [False, False, None],
... 'col6': [True, False, None]},
... columns=['col1', 'col2', 'col3', 'col4', 'col5', 'col6'])
Default behaviour checks if column-wise values all return a boolean.
>>> df.any()
col1 False
col2 True
col3 True
col4 True
col5 False
col6 True
Name: any, dtype: bool
Returns
-------
Series
"""
axis = validate_axis(axis)
if axis != 0:
raise NotImplementedError('axis should be either 0 or "index" currently.')
applied = []
column_labels = self._internal.column_labels
for label in column_labels:
col = self[label]._scol
all_col = F.max(F.coalesce(col.cast('boolean'), F.lit(False)))
applied.append(F.when(all_col.isNull(), False).otherwise(all_col))
# TODO: there is a similar logic to transpose in, for instance,
# DataFrame.all, Series.quantile. Maybe we should deduplicate it.
sdf = self._sdf
value_column = "value"
cols = []
for label, applied_col in zip(column_labels, applied):
cols.append(F.struct(
[F.lit(col).alias(SPARK_INDEX_NAME_FORMAT(i)) for i, col in enumerate(label)] +
[applied_col.alias(value_column)]))
sdf = sdf.select(
F.array(*cols).alias("arrays")
).select(F.explode(F.col("arrays")))
sdf = sdf.selectExpr("col.*")
index_column_name = lambda i: (None if self._internal.column_label_names is None
else (self._internal.column_label_names[i],))
internal = self._internal.copy(
sdf=sdf,
index_map=[(SPARK_INDEX_NAME_FORMAT(i), index_column_name(i))
for i in range(self._internal.column_labels_level)],
column_labels=None,
column_scols=[scol_for(sdf, value_column)],
column_label_names=None)
return DataFrame(internal)[value_column].rename("any")
# TODO: add axis, numeric_only, pct, na_option parameter
def rank(self, method='average', ascending=True):
"""
Compute numerical data ranks (1 through n) along axis. Equal values are
assigned a rank that is the average of the ranks of those values.
.. note:: the current implementation of rank uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
method : {'average', 'min', 'max', 'first', 'dense'}
* average: average rank of group
* min: lowest rank in group
* max: highest rank in group
* first: ranks assigned in order they appear in the array
* dense: like 'min', but rank always increases by 1 between groups
ascending : boolean, default True
False for ranks by high (1) to low (N)
Returns
-------
ranks : same type as caller
Examples
--------
>>> df = ks.DataFrame({'A': [1, 2, 2, 3], 'B': [4, 3, 2, 1]}, columns= ['A', 'B'])
>>> df
A B
0 1 4
1 2 3
2 2 2
3 3 1
>>> df.rank().sort_index()
A B
0 1.0 4.0
1 2.5 3.0
2 2.5 2.0
3 4.0 1.0
If method is set to 'min', it use lowest rank in group.
>>> df.rank(method='min').sort_index()
A B
0 1.0 4.0
1 2.0 3.0
2 2.0 2.0
3 4.0 1.0
If method is set to 'max', it use highest rank in group.
>>> df.rank(method='max').sort_index()
A B
0 1.0 4.0
1 3.0 3.0
2 3.0 2.0
3 4.0 1.0
If method is set to 'dense', it leaves no gaps in group.
>>> df.rank(method='dense').sort_index()
A B
0 1.0 4.0
1 2.0 3.0
2 2.0 2.0
3 3.0 1.0
"""
return self._apply_series_op(lambda kser: kser.rank(method=method, ascending=ascending))
def filter(self, items=None, like=None, regex=None, axis=None):
"""
Subset rows or columns of dataframe according to labels in
the specified index.
Note that this routine does not filter a dataframe on its
contents. The filter is applied to the labels of the index.
Parameters
----------
items : list-like
Keep labels from axis which are in items.
like : string
Keep labels from axis for which "like in label == True".
regex : string (regular expression)
Keep labels from axis for which re.search(regex, label) == True.
axis : int or string axis name
The axis to filter on. By default this is the info axis,
'index' for Series, 'columns' for DataFrame.
Returns
-------
same type as input object
See Also
--------
DataFrame.loc
Notes
-----
The ``items``, ``like``, and ``regex`` parameters are
enforced to be mutually exclusive.
``axis`` defaults to the info axis that is used when indexing
with ``[]``.
Examples
--------
>>> df = ks.DataFrame(np.array(([1, 2, 3], [4, 5, 6])),
... index=['mouse', 'rabbit'],
... columns=['one', 'two', 'three'])
>>> # select columns by name
>>> df.filter(items=['one', 'three'])
one three
mouse 1 3
rabbit 4 6
>>> # select columns by regular expression
>>> df.filter(regex='e$', axis=1)
one three
mouse 1 3
rabbit 4 6
>>> # select rows containing 'bbi'
>>> df.filter(like='bbi', axis=0)
one two three
rabbit 4 5 6
"""
if sum(x is not None for x in (items, like, regex)) > 1:
raise TypeError(
"Keyword arguments `items`, `like`, or `regex` "
"are mutually exclusive")
axis = validate_axis(axis, none_axis=1)
index_scols = self._internal.index_scols
if items is not None:
if is_list_like(items):
items = list(items)
else:
raise ValueError("items should be a list-like object.")
if axis == 0:
# TODO: support multi-index here
if len(index_scols) != 1:
raise ValueError("Single index must be specified.")
col = None
for item in items:
if col is None:
col = index_scols[0] == F.lit(item)
else:
col = col | (index_scols[0] == F.lit(item))
return DataFrame(self._internal.with_filter(col))
elif axis == 1:
return self[items]
elif like is not None:
if axis == 0:
# TODO: support multi-index here
if len(index_scols) != 1:
raise ValueError("Single index must be specified.")
return DataFrame(self._internal.with_filter(index_scols[0].contains(like)))
elif axis == 1:
column_labels = self._internal.column_labels
output_labels = [label for label in column_labels if any(like in i for i in label)]
return self[output_labels]
elif regex is not None:
if axis == 0:
# TODO: support multi-index here
if len(index_scols) != 1:
raise ValueError("Single index must be specified.")
return DataFrame(self._internal.with_filter(index_scols[0].rlike(regex)))
elif axis == 1:
column_labels = self._internal.column_labels
matcher = re.compile(regex)
output_labels = [label for label in column_labels
if any(matcher.search(i) is not None for i in label)]
return self[output_labels]
else:
raise TypeError("Must pass either `items`, `like`, or `regex`")
def rename(self,
mapper=None,
index=None,
columns=None,
axis='index',
inplace=False,
level=None,
errors='ignore'):
"""
Alter axes labels.
Function / dict values must be unique (1-to-1). Labels not contained in a dict / Series
will be left as-is. Extra labels listed don’t throw an error.
Parameters
----------
mapper : dict-like or function
Dict-like or functions transformations to apply to that axis’ values.
Use either `mapper` and `axis` to specify the axis to target with `mapper`, or `index`
and `columns`.
index : dict-like or function
Alternative to specifying axis ("mapper, axis=0" is equivalent to "index=mapper").
columns : dict-like or function
Alternative to specifying axis ("mapper, axis=1" is equivalent to "columns=mapper").
axis : int or str, default 'index'
Axis to target with mapper. Can be either the axis name ('index', 'columns') or
number (0, 1).
inplace : bool, default False
Whether to return a new DataFrame.
level : int or level name, default None
In case of a MultiIndex, only rename labels in the specified level.
errors : {'ignore', 'raise}, default 'ignore'
If 'raise', raise a `KeyError` when a dict-like `mapper`, `index`, or `columns`
contains labels that are not present in the Index being transformed. If 'ignore',
existing keys will be renamed and extra keys will be ignored.
Returns
-------
DataFrame with the renamed axis labels.
Raises:
-------
`KeyError`
If any of the labels is not found in the selected axis and "errors='raise'".
Examples
--------
>>> kdf1 = ks.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
>>> kdf1.rename(columns={"A": "a", "B": "c"}) # doctest: +NORMALIZE_WHITESPACE
a c
0 1 4
1 2 5
2 3 6
>>> kdf1.rename(index={1: 10, 2: 20}) # doctest: +NORMALIZE_WHITESPACE
A B
0 1 4
10 2 5
20 3 6
>>> def str_lower(s) -> str:
... return str.lower(s)
>>> kdf1.rename(str_lower, axis='columns') # doctest: +NORMALIZE_WHITESPACE
a b
0 1 4
1 2 5
2 3 6
>>> def mul10(x) -> int:
... return x * 10
>>> kdf1.rename(mul10, axis='index') # doctest: +NORMALIZE_WHITESPACE
A B
0 1 4
10 2 5
20 3 6
>>> idx = pd.MultiIndex.from_tuples([('X', 'A'), ('X', 'B'), ('Y', 'C'), ('Y', 'D')])
>>> kdf2 = ks.DataFrame([[1, 2, 3, 4], [5, 6, 7, 8]], columns=idx)
>>> kdf2.rename(columns=str_lower, level=0) # doctest: +NORMALIZE_WHITESPACE
x y
A B C D
0 1 2 3 4
1 5 6 7 8
>>> kdf3 = ks.DataFrame([[1, 2], [3, 4], [5, 6], [7, 8]], index=idx, columns=list('ab'))
>>> kdf3.rename(index=str_lower) # doctest: +NORMALIZE_WHITESPACE
a b
x a 1 2
b 3 4
y c 5 6
d 7 8
"""
def gen_mapper_fn(mapper):
if isinstance(mapper, dict):
if len(mapper) == 0:
if errors == 'raise':
raise KeyError('Index include label which is not in the `mapper`.')
else:
return DataFrame(self._internal)
type_set = set(map(lambda x: type(x), mapper.values()))
if len(type_set) > 1:
raise ValueError("Mapper dict should have the same value type.")
spark_return_type = as_spark_type(list(type_set)[0])
def mapper_fn(x):
if x in mapper:
return mapper[x]
else:
if errors == 'raise':
raise KeyError('Index include value which is not in the `mapper`')
return x
elif callable(mapper):
spark_return_type = _infer_return_type(mapper).tpe
def mapper_fn(x):
return mapper(x)
else:
raise ValueError("`mapper` or `index` or `columns` should be "
"either dict-like or function type.")
return mapper_fn, spark_return_type
index_mapper_fn = None
index_mapper_ret_stype = None
columns_mapper_fn = None
if mapper:
axis = validate_axis(axis)
if axis == 0:
index_mapper_fn, index_mapper_ret_stype = gen_mapper_fn(mapper)
elif axis == 1:
columns_mapper_fn, columns_mapper_ret_stype = gen_mapper_fn(mapper)
else:
raise ValueError("argument axis should be either the axis name "
"(‘index’, ‘columns’) or number (0, 1)")
else:
if index:
index_mapper_fn, index_mapper_ret_stype = gen_mapper_fn(index)
if columns:
columns_mapper_fn, _ = gen_mapper_fn(columns)
if not index and not columns:
raise ValueError("Either `index` or `columns` should be provided.")
internal = self._internal
if index_mapper_fn:
# rename index labels, if `level` is None, rename all index columns, otherwise only
# rename the corresponding level index.
# implement this by transform the underlying spark dataframe,
# Example:
# suppose the kdf index column in underlying spark dataframe is "index_0", "index_1",
# if rename level 0 index labels, will do:
# ``kdf._sdf.withColumn("index_0", mapper_fn_udf(col("index_0"))``
# if rename all index labels (`level` is None), then will do:
# ```
# kdf._sdf.withColumn("index_0", mapper_fn_udf(col("index_0"))
# .withColumn("index_1", mapper_fn_udf(col("index_1"))
# ```
index_columns = internal.index_columns
num_indices = len(index_columns)
if level:
if level < 0 or level >= num_indices:
raise ValueError("level should be an integer between [0, num_indices)")
def gen_new_index_column(level):
index_col_name = index_columns[level]
index_mapper_udf = pandas_udf(lambda s: s.map(index_mapper_fn),
returnType=index_mapper_ret_stype)
return index_mapper_udf(scol_for(internal.sdf, index_col_name))
sdf = internal.sdf
if level is None:
for i in range(num_indices):
sdf = sdf.withColumn(index_columns[i], gen_new_index_column(i))
else:
sdf = sdf.withColumn(index_columns[level], gen_new_index_column(level))
internal = internal.copy(sdf=sdf)
if columns_mapper_fn:
# rename column name.
# Will modify the `_internal._column_labels` and transform underlying spark dataframe
# to the same column name with `_internal._column_labels`.
if level:
if level < 0 or level >= internal.column_labels_level:
raise ValueError("level should be an integer between [0, column_labels_level)")
def gen_new_column_labels_entry(column_labels_entry):
if isinstance(column_labels_entry, tuple):
if level is None:
# rename all level columns
return tuple(map(columns_mapper_fn, column_labels_entry))
else:
# only rename specified level column
entry_list = list(column_labels_entry)
entry_list[level] = columns_mapper_fn(entry_list[level])
return tuple(entry_list)
else:
return columns_mapper_fn(column_labels_entry)
new_column_labels = list(map(gen_new_column_labels_entry, internal.column_labels))
if internal.column_labels_level == 1:
new_data_columns = [col[0] for col in new_column_labels]
else:
new_data_columns = [str(col) for col in new_column_labels]
new_data_scols = [scol_for(internal.sdf, old_col_name).alias(new_col_name)
for old_col_name, new_col_name
in zip(internal.data_columns, new_data_columns)]
internal = internal.with_new_columns(new_data_scols, column_labels=new_column_labels)
if inplace:
self._internal = internal
return self
else:
return DataFrame(internal)
def keys(self):
"""
Return alias for columns.
Returns
-------
Index
Columns of the DataFrame.
Examples
--------
>>> df = ks.DataFrame([[1, 2], [4, 5], [7, 8]],
... index=['cobra', 'viper', 'sidewinder'],
... columns=['max_speed', 'shield'])
>>> df
max_speed shield
cobra 1 2
viper 4 5
sidewinder 7 8
>>> df.keys()
Index(['max_speed', 'shield'], dtype='object')
"""
return self.columns
def pct_change(self, periods=1):
"""
Percentage change between the current and a prior element.
.. note:: the current implementation of this API uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
periods : int, default 1
Periods to shift for forming percent change.
Returns
-------
DataFrame
Examples
--------
Percentage change in French franc, Deutsche Mark, and Italian lira
from 1980-01-01 to 1980-03-01.
>>> df = ks.DataFrame({
... 'FR': [4.0405, 4.0963, 4.3149],
... 'GR': [1.7246, 1.7482, 1.8519],
... 'IT': [804.74, 810.01, 860.13]},
... index=['1980-01-01', '1980-02-01', '1980-03-01'])
>>> df
FR GR IT
1980-01-01 4.0405 1.7246 804.74
1980-02-01 4.0963 1.7482 810.01
1980-03-01 4.3149 1.8519 860.13
>>> df.pct_change()
FR GR IT
1980-01-01 NaN NaN NaN
1980-02-01 0.013810 0.013684 0.006549
1980-03-01 0.053365 0.059318 0.061876
You can set periods to shift for forming percent change
>>> df.pct_change(2)
FR GR IT
1980-01-01 NaN NaN NaN
1980-02-01 NaN NaN NaN
1980-03-01 0.067912 0.073814 0.06883
"""
window = Window.orderBy(NATURAL_ORDER_COLUMN_NAME).rowsBetween(-periods, -periods)
def op(kser):
prev_row = F.lag(kser._scol, periods).over(window)
return ((kser._scol - prev_row) / prev_row).alias(kser._internal.data_columns[0])
return self._apply_series_op(op)
# TODO: axis = 1
def idxmax(self, axis=0):
"""
Return index of first occurrence of maximum over requested axis.
NA/null values are excluded.
.. note:: This API collect all rows with maximum value using `to_pandas()`
because we suppose the number of rows with max values are usually small in general.
Parameters
----------
axis : 0 or 'index'
Can only be set to 0 at the moment.
Returns
-------
Series
See Also
--------
Series.idxmax
Examples
--------
>>> kdf = ks.DataFrame({'a': [1, 2, 3, 2],
... 'b': [4.0, 2.0, 3.0, 1.0],
... 'c': [300, 200, 400, 200]})
>>> kdf
a b c
0 1 4.0 300
1 2 2.0 200
2 3 3.0 400
3 2 1.0 200
>>> kdf.idxmax()
a 2
b 0
c 2
Name: 0, dtype: int64
For Multi-column Index
>>> kdf = ks.DataFrame({'a': [1, 2, 3, 2],
... 'b': [4.0, 2.0, 3.0, 1.0],
... 'c': [300, 200, 400, 200]})
>>> kdf.columns = pd.MultiIndex.from_tuples([('a', 'x'), ('b', 'y'), ('c', 'z')])
>>> kdf
a b c
x y z
0 1 4.0 300
1 2 2.0 200
2 3 3.0 400
3 2 1.0 200
>>> kdf.idxmax().sort_index()
a x 2
b y 0
c z 2
Name: 0, dtype: int64
"""
max_cols = map(lambda scol: F.max(scol), self._internal.column_scols)
sdf_max = self._sdf.select(*max_cols).head()
# `sdf_max` looks like below
# +------+------+------+
# |(a, x)|(b, y)|(c, z)|
# +------+------+------+
# | 3| 4.0| 400|
# +------+------+------+
conds = (scol == max_val for scol, max_val in zip(self._internal.column_scols, sdf_max))
cond = reduce(lambda x, y: x | y, conds)
kdf = DataFrame(self._internal.with_filter(cond))
pdf = kdf.to_pandas()
return ks.from_pandas(pdf.idxmax())
# TODO: axis = 1
def idxmin(self, axis=0):
"""
Return index of first occurrence of minimum over requested axis.
NA/null values are excluded.
.. note:: This API collect all rows with minimum value using `to_pandas()`
because we suppose the number of rows with min values are usually small in general.
Parameters
----------
axis : 0 or 'index'
Can only be set to 0 at the moment.
Returns
-------
Series
See Also
--------
Series.idxmin
Examples
--------
>>> kdf = ks.DataFrame({'a': [1, 2, 3, 2],
... 'b': [4.0, 2.0, 3.0, 1.0],
... 'c': [300, 200, 400, 200]})
>>> kdf
a b c
0 1 4.0 300
1 2 2.0 200
2 3 3.0 400
3 2 1.0 200
>>> kdf.idxmin()
a 0
b 3
c 1
Name: 0, dtype: int64
For Multi-column Index
>>> kdf = ks.DataFrame({'a': [1, 2, 3, 2],
... 'b': [4.0, 2.0, 3.0, 1.0],
... 'c': [300, 200, 400, 200]})
>>> kdf.columns = pd.MultiIndex.from_tuples([('a', 'x'), ('b', 'y'), ('c', 'z')])
>>> kdf
a b c
x y z
0 1 4.0 300
1 2 2.0 200
2 3 3.0 400
3 2 1.0 200
>>> kdf.idxmin().sort_index()
a x 0
b y 3
c z 1
Name: 0, dtype: int64
"""
min_cols = map(lambda scol: F.min(scol), self._internal.column_scols)
sdf_min = self._sdf.select(*min_cols).head()
conds = (scol == min_val for scol, min_val in zip(self._internal.column_scols, sdf_min))
cond = reduce(lambda x, y: x | y, conds)
kdf = DataFrame(self._internal.with_filter(cond))
pdf = kdf.to_pandas()
return ks.from_pandas(pdf.idxmin())
def info(
self, verbose=None, buf=None, max_cols=None, null_counts=None
):
"""
Print a concise summary of a DataFrame.
This method prints information about a DataFrame including
the index dtype and column dtypes, non-null values and memory usage.
Parameters
----------
verbose : bool, optional
Whether to print the full summary.
buf : writable buffer, defaults to sys.stdout
Where to send the output. By default, the output is printed to
sys.stdout. Pass a writable buffer if you need to further process
the output.
max_cols : int, optional
When to switch from the verbose to the truncated output. If the
DataFrame has more than `max_cols` columns, the truncated output
is used.
null_counts : bool, optional
Whether to show the non-null counts.
Returns
-------
None
This method prints a summary of a DataFrame and returns None.
See Also
--------
DataFrame.describe: Generate descriptive statistics of DataFrame
columns.
Examples
--------
>>> int_values = [1, 2, 3, 4, 5]
>>> text_values = ['alpha', 'beta', 'gamma', 'delta', 'epsilon']
>>> float_values = [0.0, 0.25, 0.5, 0.75, 1.0]
>>> df = ks.DataFrame(
... {"int_col": int_values, "text_col": text_values, "float_col": float_values},
... columns=['int_col', 'text_col', 'float_col'])
>>> df
int_col text_col float_col
0 1 alpha 0.00
1 2 beta 0.25
2 3 gamma 0.50
3 4 delta 0.75
4 5 epsilon 1.00
Prints information of all columns:
>>> df.info(verbose=True) # doctest: +SKIP
<class 'databricks.koalas.frame.DataFrame'>
Index: 5 entries, 0 to 4
Data columns (total 3 columns):
int_col 5 non-null int64
text_col 5 non-null object
float_col 5 non-null float64
dtypes: float64(1), int64(1), object(1)
Prints a summary of columns count and its dtypes but not per column
information:
>>> df.info(verbose=False) # doctest: +SKIP
<class 'databricks.koalas.frame.DataFrame'>
Index: 5 entries, 0 to 4
Columns: 3 entries, int_col to float_col
dtypes: float64(1), int64(1), object(1)
Pipe output of DataFrame.info to buffer instead of sys.stdout, get
buffer content and writes to a text file:
>>> import io
>>> buffer = io.StringIO()
>>> df.info(buf=buffer)
>>> s = buffer.getvalue()
>>> with open('%s/info.txt' % path, "w",
... encoding="utf-8") as f:
... _ = f.write(s)
>>> with open('%s/info.txt' % path) as f:
... f.readlines() # doctest: +NORMALIZE_WHITESPACE, +ELLIPSIS
[...databricks.koalas.frame.DataFrame...,
'Index: 5 entries, 0 to 4\\n',
'Data columns (total 3 columns):\\n',
'int_col 5 non-null int64\\n',
'text_col 5 non-null object\\n',
'float_col 5 non-null float64\\n',
'dtypes: float64(1), int64(1), object(1)']
"""
# To avoid pandas' existing config affects Koalas.
# TODO: should we have corresponding Koalas configs?
with pd.option_context(
'display.max_info_columns', sys.maxsize,
'display.max_info_rows', sys.maxsize):
try:
self._data = self # hack to use pandas' info as is.
return pd.DataFrame.info(
self, verbose=verbose, buf=buf, max_cols=max_cols,
memory_usage=False, null_counts=null_counts)
finally:
del self._data
# TODO: fix parameter 'axis' and 'numeric_only' to work same as pandas'
def quantile(self, q=0.5, axis=0, numeric_only=True, accuracy=10000):
"""
Return value at the given quantile.
.. note:: Unlike pandas', the quantile in Koalas is an approximated quantile based upon
approximate percentile computation because computing quantile across a large dataset
is extremely expensive.
Parameters
----------
q : float or array-like, default 0.5 (50% quantile)
0 <= q <= 1, the quantile(s) to compute.
axis : int, default 0 or 'index'
Can only be set to 0 at the moment.
numeric_only : bool, default True
If False, the quantile of datetime and timedelta data will be computed as well.
Can only be set to True at the moment.
accuracy : int, optional
Default accuracy of approximation. Larger value means better accuracy.
The relative error can be deduced by 1.0 / accuracy.
Returns
-------
Series or DataFrame
If q is an array, a DataFrame will be returned where the
index is q, the columns are the columns of self, and the values are the quantiles.
If q is a float, a Series will be returned where the
index is the columns of self and the values are the quantiles.
Examples
--------
>>> kdf = ks.DataFrame({'a': [1, 2, 3, 4, 5], 'b': [6, 7, 8, 9, 0]})
>>> kdf
a b
0 1 6
1 2 7
2 3 8
3 4 9
4 5 0
>>> kdf.quantile(.5)
a 3
b 7
Name: 0.5, dtype: int64
>>> kdf.quantile([.25, .5, .75])
a b
0.25 2 6
0.5 3 7
0.75 4 8
"""
result_as_series = False
axis = validate_axis(axis)
if axis != 0:
raise NotImplementedError('axis should be either 0 or "index" currently.')
if numeric_only is not True:
raise NotImplementedError("quantile currently doesn't supports numeric_only")
if isinstance(q, float):
result_as_series = True
key = str(q)
q = (q,)
quantiles = q
# First calculate the percentiles from all columns and map it to each `quantiles`
# by creating each entry as a struct. So, it becomes an array of structs as below:
#
# +-----------------------------------------+
# | arrays|
# +-----------------------------------------+
# |[[0.25, 2, 6], [0.5, 3, 7], [0.75, 4, 8]]|
# +-----------------------------------------+
sdf = self._sdf
args = ", ".join(map(str, quantiles))
percentile_cols = []
for column in self._internal.data_columns:
percentile_cols.append(F.expr(
"approx_percentile(`%s`, array(%s), %s)" % (column, args, accuracy))
.alias(column))
sdf = sdf.select(percentile_cols)
# Here, after select percntile cols, a sdf looks like below:
# +---------+---------+
# | a| b|
# +---------+---------+
# |[2, 3, 4]|[6, 7, 8]|
# +---------+---------+
cols_dict = OrderedDict()
for column in self._internal.data_columns:
cols_dict[column] = list()
for i in range(len(quantiles)):
cols_dict[column].append(scol_for(sdf, column).getItem(i).alias(column))
internal_index_column = SPARK_DEFAULT_INDEX_NAME
cols = []
for i, col in enumerate(zip(*cols_dict.values())):
cols.append(F.struct(
F.lit("%s" % quantiles[i]).alias(internal_index_column),
*col))
sdf = sdf.select(F.array(*cols).alias("arrays"))
# And then, explode it and manually set the index.
# +-----------------+---+---+
# |__index_level_0__| a| b|
# +-----------------+---+---+
# | 0.25| 2| 6|
# | 0.5| 3| 7|
# | 0.75| 4| 8|
# +-----------------+---+---+
sdf = sdf.select(F.explode(F.col("arrays"))).selectExpr("col.*")
internal = self._internal.copy(
sdf=sdf,
column_scols=[scol_for(sdf, col) for col in self._internal.data_columns],
index_map=[(internal_index_column, None)],
column_labels=self._internal.column_labels,
column_label_names=None)
return DataFrame(internal) if not result_as_series else DataFrame(internal).T[key]
def query(self, expr, inplace=False):
"""
Query the columns of a DataFrame with a boolean expression.
.. note::
* Using variables in the environment with `@` is not supported.
* Internal columns that starting with __. are able to access,
however, they are not supposed to be accessed.
* This delegates to Spark SQL so the syntax follows Spark SQL
* If you want the exactly same syntax with pandas' you can work around
by using :meth:`DataFrame.map_in_pandas`. See the example below.
>>> df = ks.DataFrame([(1, 2), (3, 4), (5, 6)], columns=['A', 'B'])
>>> df
A B
0 1 2
1 3 4
2 5 6
>>> num = 1
>>> df.map_in_pandas(lambda pdf: pdf.query('A > @num')) # doctest: +SKIP
A B
1 3 4
2 5 6
Parameters
----------
expr : str
The query string to evaluate.
You can refer to column names that contain spaces by surrounding
them in backticks.
For example, if one of your columns is called ``a a`` and you want
to sum it with ``b``, your query should be ```a a` + b``.
inplace : bool
Whether the query should modify the data in place or return
a modified copy.
Returns
-------
DataFrame
DataFrame resulting from the provided query expression.
Examples
--------
>>> df = ks.DataFrame({'A': range(1, 6),
... 'B': range(10, 0, -2),
... 'C C': range(10, 5, -1)})
>>> df
A B C C
0 1 10 10
1 2 8 9
2 3 6 8
3 4 4 7
4 5 2 6
>>> df.query('A > B')
A B C C
4 5 2 6
The previous expression is equivalent to
>>> df[df.A > df.B]
A B C C
4 5 2 6
For columns with spaces in their name, you can use backtick quoting.
>>> df.query('B == `C C`')
A B C C
0 1 10 10
The previous expression is equivalent to
>>> df[df.B == df['C C']]
A B C C
0 1 10 10
"""
if isinstance(self.columns, pd.MultiIndex):
raise ValueError("Doesn't support for MultiIndex columns")
if not isinstance(expr, str):
raise ValueError(
'expr must be a string to be evaluated, {} given'
.format(type(expr)))
if not isinstance(inplace, bool):
raise ValueError(
'For argument "inplace" expected type bool, received type {}.'
.format(type(inplace).__name__))
sdf = self._sdf.filter(expr)
internal = self._internal.copy(sdf=sdf)
if inplace:
self._internal = internal
else:
return DataFrame(internal)
def explain(self, extended: bool = False):
"""
Prints the underlying (logical and physical) Spark plans to the console for debugging
purpose.
Parameters
----------
extended : boolean, default ``False``.
If ``False``, prints only the physical plan.
Examples
--------
>>> df = ks.DataFrame({'id': range(10)})
>>> df.explain() # doctest: +ELLIPSIS
== Physical Plan ==
...
>>> df.explain(True) # doctest: +ELLIPSIS
== Parsed Logical Plan ==
...
== Analyzed Logical Plan ==
...
== Optimized Logical Plan ==
...
== Physical Plan ==
...
"""
self._internal.spark_internal_df.explain(extended)
def _to_internal_pandas(self):
"""
Return a pandas DataFrame directly from _internal to avoid overhead of copy.
This method is for internal use only.
"""
return self._internal.pandas_df
def __repr__(self):
max_display_count = get_option("display.max_rows")
if max_display_count is None:
return self._to_internal_pandas().to_string()
pdf = self.head(max_display_count + 1)._to_internal_pandas()
pdf_length = len(pdf)
pdf = pdf.iloc[:max_display_count]
if pdf_length > max_display_count:
repr_string = pdf.to_string(show_dimensions=True)
match = REPR_PATTERN.search(repr_string)
if match is not None:
nrows = match.group("rows")
ncols = match.group("columns")
footer = ("\n\n[Showing only the first {nrows} rows x {ncols} columns]"
.format(nrows=nrows, ncols=ncols))
return REPR_PATTERN.sub(footer, repr_string)
return pdf.to_string()
def _repr_html_(self):
max_display_count = get_option("display.max_rows")
# pandas 0.25.1 has a regression about HTML representation so 'bold_rows'
# has to be set as False explicitly. See https://github.com/pandas-dev/pandas/issues/28204
bold_rows = not (LooseVersion("0.25.1") == LooseVersion(pd.__version__))
if max_display_count is None:
return self._to_internal_pandas().to_html(notebook=True, bold_rows=bold_rows)
pdf = self.head(max_display_count + 1)._to_internal_pandas()
pdf_length = len(pdf)
pdf = pdf.iloc[:max_display_count]
if pdf_length > max_display_count:
repr_html = pdf.to_html(show_dimensions=True, notebook=True, bold_rows=bold_rows)
match = REPR_HTML_PATTERN.search(repr_html)
if match is not None:
nrows = match.group("rows")
ncols = match.group("columns")
by = chr(215)
footer = ('\n<p>Showing only the first {rows} rows {by} {cols} columns</p>\n</div>'
.format(rows=nrows,
by=by,
cols=ncols))
return REPR_HTML_PATTERN.sub(footer, repr_html)
return pdf.to_html(notebook=True, bold_rows=bold_rows)
def __getitem__(self, key):
from databricks.koalas.series import Series
if key is None:
raise KeyError("none key")
if isinstance(key, (str, tuple, list)):
return self.loc[:, key]
elif isinstance(key, slice):
return self.loc[key]
elif isinstance(key, Series):
return self.loc[key.astype(bool)]
raise NotImplementedError(key)
def __setitem__(self, key, value):
from databricks.koalas.series import Series
if (isinstance(value, Series) and value._kdf is not self) or \
(isinstance(value, DataFrame) and value is not self):
# Different Series or DataFrames
if isinstance(value, Series):
value = value.to_frame()
else:
assert isinstance(value, DataFrame), type(value)
value = value.copy()
level = self._internal.column_labels_level
value.columns = pd.MultiIndex.from_tuples(
[tuple([name_like_string(label)] + ([''] * (level - 1)))
for label in value._internal.column_labels])
if isinstance(key, str):
key = [(key,)]
elif isinstance(key, tuple):
key = [key]
else:
key = [k if isinstance(k, tuple) else (k,) for k in key]
if any(len(label) > level for label in key):
raise KeyError('Key length ({}) exceeds index depth ({})'
.format(max(len(label) for label in key), level))
key = [tuple(list(label) + ([''] * (level - len(label)))) for label in key]
def assign_columns(kdf, this_column_labels, that_column_labels):
assert len(key) == len(that_column_labels)
# Note that here intentionally uses `zip_longest` that combine
# that_columns.
for k, this_label, that_label \
in zip_longest(key, this_column_labels, that_column_labels):
yield (kdf[that_label], tuple(['that', *k]))
if this_label is not None and this_label[1:] != k:
yield (kdf[this_label], this_label)
kdf = align_diff_frames(assign_columns, self, value, fillna=False, how="left")
elif isinstance(key, list):
assert isinstance(value, DataFrame)
# Same DataFrames.
field_names = value.columns
kdf = self._assign({k: value[c] for k, c in zip(key, field_names)})
else:
# Same Series.
kdf = self._assign({key: value})
self._internal = kdf._internal
def __getattr__(self, key: str) -> Any:
if key.startswith("__"):
raise AttributeError(key)
if hasattr(_MissingPandasLikeDataFrame, key):
property_or_func = getattr(_MissingPandasLikeDataFrame, key)
if isinstance(property_or_func, property):
return property_or_func.fget(self) # type: ignore
else:
return partial(property_or_func, self)
try:
return self.loc[:, key]
except KeyError:
raise AttributeError(
"'%s' object has no attribute '%s'" % (self.__class__.__name__, key))
def __len__(self):
return self._sdf.count()
def __dir__(self):
fields = [f for f in self._sdf.schema.fieldNames() if ' ' not in f]
return super(DataFrame, self).__dir__() + fields
def __iter__(self):
return iter(self.columns)
# NDArray Compat
def __array_ufunc__(self, ufunc: Callable, method: str, *inputs: Any, **kwargs: Any):
# TODO: is it possible to deduplicate it with '_map_series_op'?
if (all(isinstance(inp, DataFrame) for inp in inputs)
and any(inp is not inputs[0] for inp in inputs)):
# binary only
assert len(inputs) == 2
this = inputs[0]
that = inputs[1]
if this._internal.column_labels_level != that._internal.column_labels_level:
raise ValueError('cannot join with no overlapping index names')
# Different DataFrames
def apply_op(kdf, this_column_labels, that_column_labels):
for this_label, that_label in zip(this_column_labels, that_column_labels):
yield (ufunc(kdf[this_label], kdf[that_label], **kwargs), this_label)
return align_diff_frames(apply_op, this, that, fillna=True, how="full")
else:
# DataFrame and Series
applied = []
this = inputs[0]
assert all(inp is this for inp in inputs if isinstance(inp, DataFrame))
for label in this._internal.column_labels:
arguments = []
for inp in inputs:
arguments.append(inp[label] if isinstance(inp, DataFrame) else inp)
# both binary and unary.
applied.append(ufunc(*arguments, **kwargs))
internal = this._internal.with_new_columns(applied)
return DataFrame(internal)
if sys.version_info >= (3, 7):
def __class_getitem__(cls, params):
# This is a workaround to support variadic generic in DataFrame in Python 3.7.
# See https://github.com/python/typing/issues/193
# we always wraps the given type hints by a tuple to mimic the variadic generic.
return super(cls, DataFrame).__class_getitem__(Tuple[params])
elif (3, 5) <= sys.version_info < (3, 7):
# This is a workaround to support variadic generic in DataFrame in Python 3.5+
# The implementation is in its metaclass so this flag is needed to distinguish
# Koalas DataFrame.
is_dataframe = None
def _reduce_spark_multi(sdf, aggs):
"""
Performs a reduction on a dataframe, the functions being known sql aggregate functions.
"""
assert isinstance(sdf, spark.DataFrame)
sdf0 = sdf.agg(*aggs)
l = sdf0.head(2)
assert len(l) == 1, (sdf, l)
row = l[0]
l2 = list(row)
assert len(l2) == len(aggs), (row, l2)
return l2
class _CachedDataFrame(DataFrame):
"""
Cached Koalas DataFrame, which corresponds to Pandas DataFrame logically, but internally
it caches the corresponding Spark DataFrame.
"""
def __init__(self, internal):
self._cached = internal._sdf.cache()
super(_CachedDataFrame, self).__init__(internal)
def __enter__(self):
return self
def __exit__(self, exception_type, exception_value, traceback):
self.unpersist()
def unpersist(self):
"""
The `unpersist` function is used to uncache the Koalas DataFrame when it
is not used with `with` statement.
Examples
--------
>>> df = ks.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)],
... columns=['dogs', 'cats'])
>>> df = df.cache()
To uncache the dataframe, use `unpersist` function
>>> df.unpersist()
"""
if self._cached.is_cached:
self._cached.unpersist()
| 1 | 14,300 | If we support multi-index column later, we need to rename to fit the pandas' requirement. | databricks-koalas | py |
@@ -0,0 +1,11 @@
+package aws
+
+// JSONValue is a representation of a grab bag type that will be marshaled
+// into a json string. This type can be used just like any other map.
+//
+// Example:
+// values := JSONValue{
+// "Foo": "Bar",
+// }
+// values["Baz"] = "Qux"
+type JSONValue map[string]interface{} | 1 | 1 | 8,547 | I'm not sure this is really needed. JSONValue type can be used the same as a map as far as operators go, including range. I'd leave this out for now unless there is a strong reason to keep it. | aws-aws-sdk-go | go |
|
@@ -724,6 +724,7 @@ void Container::internalAddThing(uint32_t, Thing* thing)
void Container::startDecaying()
{
+ Item::startDecaying();
for (Item* item : itemlist) {
item->startDecaying();
} | 1 | /**
* The Forgotten Server - a free and open-source MMORPG server emulator
* Copyright (C) 2019 Mark Samman <[email protected]>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#include "otpch.h"
#include "container.h"
#include "iomap.h"
#include "game.h"
extern Game g_game;
Container::Container(uint16_t type) :
Container(type, items[type].maxItems) {}
Container::Container(uint16_t type, uint16_t size, bool unlocked /*= true*/, bool pagination /*= false*/) :
Item(type),
maxSize(size),
unlocked(unlocked),
pagination(pagination)
{}
Container::Container(Tile* tile) : Container(ITEM_BROWSEFIELD, 30, false, true)
{
TileItemVector* itemVector = tile->getItemList();
if (itemVector) {
for (Item* item : *itemVector) {
if ((item->getContainer() || item->hasProperty(CONST_PROP_MOVEABLE)) && !item->hasAttribute(ITEM_ATTRIBUTE_UNIQUEID)) {
itemlist.push_front(item);
item->setParent(this);
}
}
}
setParent(tile);
}
Container::~Container()
{
if (getID() == ITEM_BROWSEFIELD) {
g_game.browseFields.erase(getTile());
for (Item* item : itemlist) {
item->setParent(parent);
}
} else {
for (Item* item : itemlist) {
item->setParent(nullptr);
item->decrementReferenceCounter();
}
}
}
Item* Container::clone() const
{
Container* clone = static_cast<Container*>(Item::clone());
for (Item* item : itemlist) {
clone->addItem(item->clone());
}
clone->totalWeight = totalWeight;
return clone;
}
Container* Container::getParentContainer()
{
Thing* thing = getParent();
if (!thing) {
return nullptr;
}
return thing->getContainer();
}
std::string Container::getName(bool addArticle /* = false*/) const {
const ItemType& it = items[id];
return getNameDescription(it, this, -1, addArticle);
}
bool Container::hasParent() const
{
return getID() != ITEM_BROWSEFIELD && dynamic_cast<const Player*>(getParent()) == nullptr;
}
void Container::addItem(Item* item)
{
itemlist.push_back(item);
item->setParent(this);
}
Attr_ReadValue Container::readAttr(AttrTypes_t attr, PropStream& propStream)
{
if (attr == ATTR_CONTAINER_ITEMS) {
if (!propStream.read<uint32_t>(serializationCount)) {
return ATTR_READ_ERROR;
}
return ATTR_READ_END;
}
return Item::readAttr(attr, propStream);
}
bool Container::unserializeItemNode(OTB::Loader& loader, const OTB::Node& node, PropStream& propStream)
{
bool ret = Item::unserializeItemNode(loader, node, propStream);
if (!ret) {
return false;
}
for (auto& itemNode : node.children) {
//load container items
if (itemNode.type != OTBM_ITEM) {
// unknown type
return false;
}
PropStream itemPropStream;
if (!loader.getProps(itemNode, itemPropStream)) {
return false;
}
Item* item = Item::CreateItem(itemPropStream);
if (!item) {
return false;
}
if (!item->unserializeItemNode(loader, itemNode, itemPropStream)) {
return false;
}
addItem(item);
updateItemWeight(item->getWeight());
}
return true;
}
void Container::updateItemWeight(int32_t diff)
{
totalWeight += diff;
if (Container* parentContainer = getParentContainer()) {
parentContainer->updateItemWeight(diff);
}
}
uint32_t Container::getWeight() const
{
return Item::getWeight() + totalWeight;
}
std::string Container::getContentDescription() const
{
std::ostringstream os;
return getContentDescription(os).str();
}
std::ostringstream& Container::getContentDescription(std::ostringstream& os) const
{
bool firstitem = true;
for (ContainerIterator it = iterator(); it.hasNext(); it.advance()) {
Item* item = *it;
Container* container = item->getContainer();
if (container && !container->empty()) {
continue;
}
if (firstitem) {
firstitem = false;
} else {
os << ", ";
}
os << item->getNameDescription();
}
if (firstitem) {
os << "nothing";
}
return os;
}
Item* Container::getItemByIndex(size_t index) const
{
if (index >= size()) {
return nullptr;
}
return itemlist[index];
}
uint32_t Container::getItemHoldingCount() const
{
uint32_t counter = 0;
for (ContainerIterator it = iterator(); it.hasNext(); it.advance()) {
++counter;
}
return counter;
}
bool Container::isHoldingItem(const Item* item) const
{
for (ContainerIterator it = iterator(); it.hasNext(); it.advance()) {
if (*it == item) {
return true;
}
}
return false;
}
void Container::onAddContainerItem(Item* item)
{
SpectatorVec spectators;
g_game.map.getSpectators(spectators, getPosition(), false, true, 1, 1, 1, 1);
//send to client
for (Creature* spectator : spectators) {
spectator->getPlayer()->sendAddContainerItem(this, item);
}
//event methods
for (Creature* spectator : spectators) {
spectator->getPlayer()->onAddContainerItem(item);
}
}
void Container::onUpdateContainerItem(uint32_t index, Item* oldItem, Item* newItem)
{
SpectatorVec spectators;
g_game.map.getSpectators(spectators, getPosition(), false, true, 1, 1, 1, 1);
//send to client
for (Creature* spectator : spectators) {
spectator->getPlayer()->sendUpdateContainerItem(this, index, newItem);
}
//event methods
for (Creature* spectator : spectators) {
spectator->getPlayer()->onUpdateContainerItem(this, oldItem, newItem);
}
}
void Container::onRemoveContainerItem(uint32_t index, Item* item)
{
SpectatorVec spectators;
g_game.map.getSpectators(spectators, getPosition(), false, true, 1, 1, 1, 1);
//send change to client
for (Creature* spectator : spectators) {
spectator->getPlayer()->sendRemoveContainerItem(this, index);
}
//event methods
for (Creature* spectator : spectators) {
spectator->getPlayer()->onRemoveContainerItem(this, item);
}
}
ReturnValue Container::queryAdd(int32_t index, const Thing& thing, uint32_t count,
uint32_t flags, Creature* actor/* = nullptr*/) const
{
bool childIsOwner = hasBitSet(FLAG_CHILDISOWNER, flags);
if (childIsOwner) {
//a child container is querying, since we are the top container (not carried by a player)
//just return with no error.
return RETURNVALUE_NOERROR;
}
if (!unlocked) {
return RETURNVALUE_NOTPOSSIBLE;
}
const Item* item = thing.getItem();
if (item == nullptr) {
return RETURNVALUE_NOTPOSSIBLE;
}
if (!item->isPickupable()) {
return RETURNVALUE_CANNOTPICKUP;
}
if (item == this) {
return RETURNVALUE_THISISIMPOSSIBLE;
}
// store items can be only moved into depot chest or store inbox
if (item->isStoreItem() && !dynamic_cast<const DepotChest*>(this)) {
return RETURNVALUE_ITEMCANNOTBEMOVEDTHERE;
}
const Cylinder* cylinder = getParent();
// don't allow moving items into container that is store item and is in store inbox
if (isStoreItem() && dynamic_cast<const StoreInbox*>(cylinder)) {
ReturnValue ret = RETURNVALUE_ITEMCANNOTBEMOVEDTHERE;
if (!item->isStoreItem()) {
ret = RETURNVALUE_CANNOTMOVEITEMISNOTSTOREITEM;
}
return ret;
}
if (!hasBitSet(FLAG_NOLIMIT, flags)) {
while (cylinder) {
if (cylinder == &thing) {
return RETURNVALUE_THISISIMPOSSIBLE;
}
if (dynamic_cast<const Inbox*>(cylinder)) {
return RETURNVALUE_CONTAINERNOTENOUGHROOM;
}
cylinder = cylinder->getParent();
}
if (index == INDEX_WHEREEVER && size() >= capacity()) {
return RETURNVALUE_CONTAINERNOTENOUGHROOM;
}
} else {
while (cylinder) {
if (cylinder == &thing) {
return RETURNVALUE_THISISIMPOSSIBLE;
}
cylinder = cylinder->getParent();
}
}
const Cylinder* topParent = getTopParent();
if (topParent != this) {
return topParent->queryAdd(INDEX_WHEREEVER, *item, count, flags | FLAG_CHILDISOWNER, actor);
} else {
return RETURNVALUE_NOERROR;
}
}
ReturnValue Container::queryMaxCount(int32_t index, const Thing& thing, uint32_t count,
uint32_t& maxQueryCount, uint32_t flags) const
{
const Item* item = thing.getItem();
if (item == nullptr) {
maxQueryCount = 0;
return RETURNVALUE_NOTPOSSIBLE;
}
if (hasBitSet(FLAG_NOLIMIT, flags)) {
maxQueryCount = std::max<uint32_t>(1, count);
return RETURNVALUE_NOERROR;
}
int32_t freeSlots = std::max<int32_t>(capacity() - size(), 0);
if (item->isStackable()) {
uint32_t n = 0;
if (index == INDEX_WHEREEVER) {
//Iterate through every item and check how much free stackable slots there is.
uint32_t slotIndex = 0;
for (Item* containerItem : itemlist) {
if (containerItem != item && containerItem->equals(item) && containerItem->getItemCount() < 100) {
if (queryAdd(slotIndex++, *item, count, flags) == RETURNVALUE_NOERROR) {
n += 100 - containerItem->getItemCount();
}
}
}
} else {
const Item* destItem = getItemByIndex(index);
if (item->equals(destItem) && destItem->getItemCount() < 100) {
if (queryAdd(index, *item, count, flags) == RETURNVALUE_NOERROR) {
n = 100 - destItem->getItemCount();
}
}
}
maxQueryCount = freeSlots * 100 + n;
if (maxQueryCount < count) {
return RETURNVALUE_CONTAINERNOTENOUGHROOM;
}
} else {
maxQueryCount = freeSlots;
if (maxQueryCount == 0) {
return RETURNVALUE_CONTAINERNOTENOUGHROOM;
}
}
return RETURNVALUE_NOERROR;
}
ReturnValue Container::queryRemove(const Thing& thing, uint32_t count, uint32_t flags, Creature* actor /*= nullptr */) const
{
int32_t index = getThingIndex(&thing);
if (index == -1) {
return RETURNVALUE_NOTPOSSIBLE;
}
const Item* item = thing.getItem();
if (item == nullptr) {
return RETURNVALUE_NOTPOSSIBLE;
}
if (count == 0 || (item->isStackable() && count > item->getItemCount())) {
return RETURNVALUE_NOTPOSSIBLE;
}
if (!item->isMoveable() && !hasBitSet(FLAG_IGNORENOTMOVEABLE, flags)) {
return RETURNVALUE_NOTMOVEABLE;
}
const HouseTile* houseTile = dynamic_cast<const HouseTile*>(getTopParent());
if (houseTile) {
return houseTile->queryRemove(thing, count, flags, actor);
}
return RETURNVALUE_NOERROR;
}
Cylinder* Container::queryDestination(int32_t& index, const Thing& thing, Item** destItem,
uint32_t& flags)
{
if (!unlocked) {
*destItem = nullptr;
return this;
}
if (index == 254 /*move up*/) {
index = INDEX_WHEREEVER;
*destItem = nullptr;
Container* parentContainer = dynamic_cast<Container*>(getParent());
if (parentContainer) {
return parentContainer;
}
return this;
}
if (index == 255 /*add wherever*/) {
index = INDEX_WHEREEVER;
*destItem = nullptr;
} else if (index >= static_cast<int32_t>(capacity())) {
/*
if you have a container, maximize it to show all 20 slots
then you open a bag that is inside the container you will have a bag with 8 slots
and a "grey" area where the other 12 slots where from the container
if you drop the item on that grey area
the client calculates the slot position as if the bag has 20 slots
*/
index = INDEX_WHEREEVER;
*destItem = nullptr;
}
const Item* item = thing.getItem();
if (!item) {
return this;
}
if (index != INDEX_WHEREEVER) {
Item* itemFromIndex = getItemByIndex(index);
if (itemFromIndex) {
*destItem = itemFromIndex;
}
Cylinder* subCylinder = dynamic_cast<Cylinder*>(*destItem);
if (subCylinder) {
index = INDEX_WHEREEVER;
*destItem = nullptr;
return subCylinder;
}
}
bool autoStack = !hasBitSet(FLAG_IGNOREAUTOSTACK, flags);
if (autoStack && item->isStackable() && item->getParent() != this) {
if (*destItem && (*destItem)->equals(item) && (*destItem)->getItemCount() < 100) {
return this;
}
//try find a suitable item to stack with
uint32_t n = 0;
for (Item* listItem : itemlist) {
if (listItem != item && listItem->equals(item) && listItem->getItemCount() < 100) {
*destItem = listItem;
index = n;
return this;
}
++n;
}
}
return this;
}
void Container::addThing(Thing* thing)
{
return addThing(0, thing);
}
void Container::addThing(int32_t index, Thing* thing)
{
if (index >= static_cast<int32_t>(capacity())) {
return /*RETURNVALUE_NOTPOSSIBLE*/;
}
Item* item = thing->getItem();
if (item == nullptr) {
return /*RETURNVALUE_NOTPOSSIBLE*/;
}
item->setParent(this);
itemlist.push_front(item);
updateItemWeight(item->getWeight());
//send change to client
if (getParent() && (getParent() != VirtualCylinder::virtualCylinder)) {
onAddContainerItem(item);
}
}
void Container::addItemBack(Item* item)
{
addItem(item);
updateItemWeight(item->getWeight());
//send change to client
if (getParent() && (getParent() != VirtualCylinder::virtualCylinder)) {
onAddContainerItem(item);
}
}
void Container::updateThing(Thing* thing, uint16_t itemId, uint32_t count)
{
int32_t index = getThingIndex(thing);
if (index == -1) {
return /*RETURNVALUE_NOTPOSSIBLE*/;
}
Item* item = thing->getItem();
if (item == nullptr) {
return /*RETURNVALUE_NOTPOSSIBLE*/;
}
const int32_t oldWeight = item->getWeight();
item->setID(itemId);
item->setSubType(count);
updateItemWeight(-oldWeight + item->getWeight());
//send change to client
if (getParent()) {
onUpdateContainerItem(index, item, item);
}
}
void Container::replaceThing(uint32_t index, Thing* thing)
{
Item* item = thing->getItem();
if (!item) {
return /*RETURNVALUE_NOTPOSSIBLE*/;
}
Item* replacedItem = getItemByIndex(index);
if (!replacedItem) {
return /*RETURNVALUE_NOTPOSSIBLE*/;
}
itemlist[index] = item;
item->setParent(this);
updateItemWeight(-static_cast<int32_t>(replacedItem->getWeight()) + item->getWeight());
//send change to client
if (getParent()) {
onUpdateContainerItem(index, replacedItem, item);
}
replacedItem->setParent(nullptr);
}
void Container::removeThing(Thing* thing, uint32_t count)
{
Item* item = thing->getItem();
if (item == nullptr) {
return /*RETURNVALUE_NOTPOSSIBLE*/;
}
int32_t index = getThingIndex(thing);
if (index == -1) {
return /*RETURNVALUE_NOTPOSSIBLE*/;
}
if (item->isStackable() && count != item->getItemCount()) {
uint8_t newCount = static_cast<uint8_t>(std::max<int32_t>(0, item->getItemCount() - count));
const int32_t oldWeight = item->getWeight();
item->setItemCount(newCount);
updateItemWeight(-oldWeight + item->getWeight());
//send change to client
if (getParent()) {
onUpdateContainerItem(index, item, item);
}
} else {
updateItemWeight(-static_cast<int32_t>(item->getWeight()));
//send change to client
if (getParent()) {
onRemoveContainerItem(index, item);
}
item->setParent(nullptr);
itemlist.erase(itemlist.begin() + index);
}
}
int32_t Container::getThingIndex(const Thing* thing) const
{
int32_t index = 0;
for (Item* item : itemlist) {
if (item == thing) {
return index;
}
++index;
}
return -1;
}
size_t Container::getFirstIndex() const
{
return 0;
}
size_t Container::getLastIndex() const
{
return size();
}
uint32_t Container::getItemTypeCount(uint16_t itemId, int32_t subType/* = -1*/) const
{
uint32_t count = 0;
for (Item* item : itemlist) {
if (item->getID() == itemId) {
count += countByType(item, subType);
}
}
return count;
}
std::map<uint32_t, uint32_t>& Container::getAllItemTypeCount(std::map<uint32_t, uint32_t>& countMap) const
{
for (Item* item : itemlist) {
countMap[item->getID()] += item->getItemCount();
}
return countMap;
}
ItemVector Container::getItems(bool recursive /*= false*/)
{
ItemVector containerItems;
if (recursive) {
for (ContainerIterator it = iterator(); it.hasNext(); it.advance()) {
containerItems.push_back(*it);
}
} else {
for (Item* item : itemlist) {
containerItems.push_back(item);
}
}
return containerItems;
}
Thing* Container::getThing(size_t index) const
{
return getItemByIndex(index);
}
void Container::postAddNotification(Thing* thing, const Cylinder* oldParent, int32_t index, cylinderlink_t)
{
Cylinder* topParent = getTopParent();
if (topParent->getCreature()) {
topParent->postAddNotification(thing, oldParent, index, LINK_TOPPARENT);
} else if (topParent == this) {
//let the tile class notify surrounding players
if (topParent->getParent()) {
topParent->getParent()->postAddNotification(thing, oldParent, index, LINK_NEAR);
}
} else {
topParent->postAddNotification(thing, oldParent, index, LINK_PARENT);
}
}
void Container::postRemoveNotification(Thing* thing, const Cylinder* newParent, int32_t index, cylinderlink_t)
{
Cylinder* topParent = getTopParent();
if (topParent->getCreature()) {
topParent->postRemoveNotification(thing, newParent, index, LINK_TOPPARENT);
} else if (topParent == this) {
//let the tile class notify surrounding players
if (topParent->getParent()) {
topParent->getParent()->postRemoveNotification(thing, newParent, index, LINK_NEAR);
}
} else {
topParent->postRemoveNotification(thing, newParent, index, LINK_PARENT);
}
}
void Container::internalAddThing(Thing* thing)
{
internalAddThing(0, thing);
}
void Container::internalAddThing(uint32_t, Thing* thing)
{
Item* item = thing->getItem();
if (item == nullptr) {
return;
}
item->setParent(this);
itemlist.push_front(item);
updateItemWeight(item->getWeight());
}
void Container::startDecaying()
{
for (Item* item : itemlist) {
item->startDecaying();
}
}
ContainerIterator Container::iterator() const
{
ContainerIterator cit;
if (!itemlist.empty()) {
cit.over.push_back(this);
cit.cur = itemlist.begin();
}
return cit;
}
Item* ContainerIterator::operator*()
{
return *cur;
}
void ContainerIterator::advance()
{
if (Item* i = *cur) {
if (Container* c = i->getContainer()) {
if (!c->empty()) {
over.push_back(c);
}
}
}
++cur;
if (cur == over.front()->itemlist.end()) {
over.pop_front();
if (!over.empty()) {
cur = over.front()->itemlist.begin();
}
}
}
| 1 | 19,909 | sorry for nitpicking but I would love a new line under this line | otland-forgottenserver | cpp |
@@ -21,5 +21,5 @@ package node
type OptionsTransactor struct {
TransactorEndpointAddress string
RegistryAddress string
- AccountantID string
+ ChannelImplementation string
} | 1 | /*
* Copyright (C) 2019 The "MysteriumNetwork/node" Authors.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package node
// OptionsTransactor describes possible parameters for interaction with transactor
type OptionsTransactor struct {
TransactorEndpointAddress string
RegistryAddress string
AccountantID string
}
| 1 | 15,021 | ChannelImplementation field naming is not clear, is it some kind of standard? Maybe it can be named ChannelID? | mysteriumnetwork-node | go |
@@ -67,4 +67,11 @@ public interface ActionsProvider {
default ExpireSnapshots expireSnapshots(Table table) {
throw new UnsupportedOperationException(this.getClass().getName() + " does not implement expireSnapshots");
}
+
+ /**
+ * Instantiates an action to remove all the files referenced by given metadata location.
+ */
+ default RemoveFiles removeFiles(String metadataLocation) {
+ throw new UnsupportedOperationException(this.getClass().getName() + " does not implement expireSnapshots");
+ }
} | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg.actions;
import org.apache.iceberg.Table;
/**
* An API that should be implemented by query engine integrations for providing actions.
*/
public interface ActionsProvider {
/**
* Instantiates an action to snapshot an existing table as a new Iceberg table.
*/
default SnapshotTable snapshotTable(String sourceTableIdent) {
throw new UnsupportedOperationException(this.getClass().getName() + " does not implement snapshotTable");
}
/**
* Instantiates an action to migrate an existing table to Iceberg.
*/
default MigrateTable migrateTable(String tableIdent) {
throw new UnsupportedOperationException(this.getClass().getName() + " does not implement migrateTable");
}
/**
* Instantiates an action to remove orphan files.
*/
default RemoveOrphanFiles removeOrphanFiles(Table table) {
throw new UnsupportedOperationException(this.getClass().getName() + " does not implement removeOrphanFiles");
}
/**
* Instantiates an action to rewrite manifests.
*/
default RewriteManifests rewriteManifests(Table table) {
throw new UnsupportedOperationException(this.getClass().getName() + " does not implement rewriteManifests");
}
/**
* Instantiates an action to rewrite data files.
*/
default RewriteDataFiles rewriteDataFiles(Table table) {
throw new UnsupportedOperationException(this.getClass().getName() + " does not implement rewriteDataFiles");
}
/**
* Instantiates an action to expire snapshots.
*/
default ExpireSnapshots expireSnapshots(Table table) {
throw new UnsupportedOperationException(this.getClass().getName() + " does not implement expireSnapshots");
}
}
| 1 | 35,820 | Looks like there is a typo: `expireSnapshots` -> `removeFiles` or whatever name we go with. | apache-iceberg | java |
@@ -31,7 +31,7 @@ public abstract class IntraFeedSortDialog {
int idxCurrentSort = -1;
for (int i = 0; i < values.length; i++) {
- if (currentSortOrder == values[i]) {
+ if (currentSortOrder == values[i] || currentSortOrder == null) {
idxCurrentSort = i;
break;
} | 1 | package de.danoeh.antennapod.dialog;
import android.content.Context;
import androidx.annotation.NonNull;
import androidx.annotation.Nullable;
import androidx.appcompat.app.AlertDialog;
import de.danoeh.antennapod.R;
import de.danoeh.antennapod.core.util.SortOrder;
public abstract class IntraFeedSortDialog {
@Nullable
protected SortOrder currentSortOrder;
@NonNull
protected Context context;
public IntraFeedSortDialog(@NonNull Context context, @Nullable SortOrder sortOrder) {
this.context = context;
this.currentSortOrder = sortOrder;
}
public void openDialog() {
final String[] items = context.getResources().getStringArray(R.array.feed_episodes_sort_options);
final String[] valueStrs = context.getResources().getStringArray(R.array.feed_episodes_sort_values);
final SortOrder[] values = new SortOrder[valueStrs.length];
for (int i = 0; i < valueStrs.length; i++) {
values[i] = SortOrder.valueOf(valueStrs[i]);
}
int idxCurrentSort = -1;
for (int i = 0; i < values.length; i++) {
if (currentSortOrder == values[i]) {
idxCurrentSort = i;
break;
}
}
AlertDialog.Builder builder = new AlertDialog.Builder(context);
builder.setTitle(R.string.sort);
builder.setSingleChoiceItems(items, idxCurrentSort, (dialog, idxNewSort) -> {
updateSort(values[idxNewSort]);
dialog.dismiss();
});
builder.setNegativeButton(R.string.cancel_label, null);
builder.create().show();
}
protected abstract void updateSort(@NonNull SortOrder sortOrder);
}
| 1 | 15,917 | Thanks for looking into this. I think it looks a bit strange to have this check inside the for loop. Wouldn't it also work to initialize `idxCurrentSort` with 0 instead? | AntennaPod-AntennaPod | java |
@@ -72,6 +72,8 @@ public class Notification {
public static final int OMNIPOD_POD_NOT_ATTACHED = 59;
public static final int CARBS_REQUIRED = 60;
public static final int IMPORTANCE_HIGH = 2;
+ public static final int OMNIPOD_POD_SUSPENDED = 61;
+ public static final int OMNIPOD_POD_ALERTS_UPDATED = 62;
public static final String CATEGORY_ALARM = "alarm";
| 1 | package info.nightscout.androidaps.plugins.general.overview.notifications;
import androidx.annotation.NonNull;
import info.nightscout.androidaps.utils.T;
public class Notification {
// TODO join with NotificationWithAction after change to enums
public static final int URGENT = 0;
public static final int NORMAL = 1;
public static final int LOW = 2;
public static final int INFO = 3;
public static final int ANNOUNCEMENT = 4;
public static final int PROFILE_SET_FAILED = 0;
public static final int PROFILE_SET_OK = 1;
public static final int EASYMODE_ENABLED = 2;
public static final int EXTENDED_BOLUS_DISABLED = 3;
public static final int UD_MODE_ENABLED = 4;
public static final int PROFILE_NOT_SET_NOT_INITIALIZED = 5;
public static final int FAILED_UDPATE_PROFILE = 6;
public static final int BASAL_VALUE_BELOW_MINIMUM = 7;
public static final int OLD_NS = 9;
public static final int INVALID_PHONE_NUMBER = 10;
public static final int INVALID_MESSAGE_BODY = 11;
public static final int APPROACHING_DAILY_LIMIT = 12;
public static final int NSCLIENT_NO_WRITE_PERMISSION = 13;
public static final int MISSING_SMS_PERMISSION = 14;
public static final int PUMPERROR = 15;
public static final int WRONGSERIALNUMBER = 16;
public static final int NSANNOUNCEMENT = 18;
public static final int NSALARM = 19;
public static final int NSURGENTALARM = 20;
public static final int SHORT_DIA = 21;
public static final int TOAST_ALARM = 22;
public static final int WRONGBASALSTEP = 23;
public static final int WRONG_DRIVER = 24;
public static final int COMBO_PUMP_ALARM = 25;
public static final int PUMP_UNREACHABLE = 26;
public static final int BG_READINGS_MISSED = 27;
public static final int UNSUPPORTED_FIRMWARE = 28;
public static final int MINIMAL_BASAL_VALUE_REPLACED = 29;
public static final int BASAL_PROFILE_NOT_ALIGNED_TO_HOURS = 30;
public static final int ZERO_VALUE_IN_PROFILE = 31;
public static final int PROFILE_SWITCH_MISSING = 32;
public static final int NOT_ENG_MODE_OR_RELEASE = 33;
public static final int WRONG_PUMP_PASSWORD = 34;
public static final int PERMISSION_STORAGE = 35;
public static final int PERMISSION_LOCATION = 36;
public static final int PERMISSION_BATTERY = 37;
public static final int PERMISSION_SMS = 38;
public static final int MAXIMUM_BASAL_VALUE_REPLACED = 39;
public static final int NSMALFUNCTION = 40;
public static final int NEWVERSIONDETECTED = 41;
public static final int SENDLOGFILES = 42;
public static final int DEVICENOTPAIRED = 43;
public static final int MEDTRONIC_PUMP_ALARM = 44;
public static final int RILEYLINK_CONNECTION = 45;
public static final int PERMISSION_PHONESTATE = 46;
public static final int INSIGHT_DATE_TIME_UPDATED = 47;
public static final int INSIGHT_TIMEOUT_DURING_HANDSHAKE = 48;
public static final int DST_LOOP_DISABLED = 49;
public static final int DST_IN_24H = 50;
public static final int DISKFULL = 51;
public static final int OLDVERSION = 52;
public static final int OVER_24H_TIME_CHANGE_REQUESTED = 54;
public static final int INVALID_VERSION = 55;
public static final int PERMISSION_SYSTEM_WINDOW = 56;
public static final int OMNIPOD_PUMP_ALARM = 57;
public static final int TIME_OR_TIMEZONE_CHANGE = 58;
public static final int OMNIPOD_POD_NOT_ATTACHED = 59;
public static final int CARBS_REQUIRED = 60;
public static final int IMPORTANCE_HIGH = 2;
public static final String CATEGORY_ALARM = "alarm";
public static final int USERMESSAGE = 1000;
public int id;
public long date;
public String text;
public int level;
public long validTo = 0;
public Integer soundId = null;
protected Runnable action = null;
protected int buttonText = 0;
public Notification() {
}
public Notification(int id, long date, String text, int level, long validTo) {
this.id = id;
this.date = date;
this.text = text;
this.level = level;
this.validTo = validTo;
}
public Notification(int id, String text, int level, int validMinutes) {
this.id = id;
this.date = System.currentTimeMillis();
this.text = text;
this.level = level;
this.validTo = System.currentTimeMillis() + T.mins(validMinutes).msecs();
}
public Notification(int id, @NonNull String text, int level) {
this.id = id;
this.date = System.currentTimeMillis();
this.text = text;
this.level = level;
}
public Notification(int id) {
this.id = id;
this.date = System.currentTimeMillis();
}
public Notification text(String text) {
this.text = text;
return this;
}
public Notification level(int level) {
this.level = level;
return this;
}
public Notification sound(int soundId) {
this.soundId = soundId;
return this;
}
}
| 1 | 32,545 | Just a small ordering thing: Could you please bring `IMPORTANCE_HIGH` to the bottom and maybe even have one line between it and the Notification IDs? | MilosKozak-AndroidAPS | java |
@@ -48,7 +48,7 @@ void prepareMolForDrawing(RWMol &mol, bool kekulize, bool addChiralHs,
if (kekulize) {
try {
MolOps::Kekulize(mol, false); // kekulize, but keep the aromatic flags!
- } catch(const RDKit::AtomKekulizeException &e) {
+ } catch (const RDKit::AtomKekulizeException &e) {
std::cerr << e.what() << std::endl;
}
} | 1 | //
// Copyright (C) 2016-2019 Greg Landrum
//
// @@ All Rights Reserved @@
// This file is part of the RDKit.
// The contents are covered by the terms of the BSD license
// which is included in the file license.txt, found at the root
// of the RDKit source tree.
//
#include <GraphMol/MolDraw2D/MolDraw2D.h>
#include <GraphMol/MolDraw2D/MolDraw2DUtils.h>
#include <GraphMol/RWMol.h>
#include <GraphMol/MolOps.h>
#include <GraphMol/Depictor/RDDepictor.h>
#include <GraphMol/FileParsers/MolFileStereochem.h>
#include <RDGeneral/BoostStartInclude.h>
#include <boost/foreach.hpp>
#include <boost/lexical_cast.hpp>
#include <boost/property_tree/ptree.hpp>
#include <boost/property_tree/json_parser.hpp>
#include <RDGeneral/BoostEndInclude.h>
#include <limits>
#include <cmath>
#include <Numerics/Conrec.h>
namespace RDKit {
namespace MolDraw2DUtils {
namespace {
bool isAtomCandForChiralH(const RWMol &mol, const Atom *atom) {
// conditions for needing a chiral H:
// - stereochem specified
// - in at least two rings
if ((!mol.getRingInfo()->isInitialized() ||
mol.getRingInfo()->numAtomRings(atom->getIdx()) > 1) &&
(atom->getChiralTag() == Atom::CHI_TETRAHEDRAL_CCW ||
atom->getChiralTag() == Atom::CHI_TETRAHEDRAL_CW)) {
return true;
}
return false;
}
} // end of anonymous namespace
void prepareMolForDrawing(RWMol &mol, bool kekulize, bool addChiralHs,
bool wedgeBonds, bool forceCoords) {
if (kekulize) {
try {
MolOps::Kekulize(mol, false); // kekulize, but keep the aromatic flags!
} catch(const RDKit::AtomKekulizeException &e) {
std::cerr << e.what() << std::endl;
}
}
if (addChiralHs) {
std::vector<unsigned int> chiralAts;
for (RWMol::AtomIterator atIt = mol.beginAtoms(); atIt != mol.endAtoms();
++atIt) {
if (isAtomCandForChiralH(mol, *atIt)) {
chiralAts.push_back((*atIt)->getIdx());
}
}
if (chiralAts.size()) {
bool addCoords = false;
if (!forceCoords && mol.getNumConformers()) {
addCoords = true;
}
MolOps::addHs(mol, false, addCoords, &chiralAts);
}
}
if (forceCoords || !mol.getNumConformers()) {
// compute 2D coordinates in a standard orientation:
const bool canonOrient = true;
RDDepict::compute2DCoords(mol, nullptr, canonOrient);
}
if (wedgeBonds) {
WedgeMolBonds(mol, &mol.getConformer());
}
}
void prepareAndDrawMolecule(MolDraw2D &drawer, const ROMol &mol,
const std::string &legend,
const std::vector<int> *highlight_atoms,
const std::vector<int> *highlight_bonds,
const std::map<int, DrawColour> *highlight_atom_map,
const std::map<int, DrawColour> *highlight_bond_map,
const std::map<int, double> *highlight_radii,
int confId) {
RWMol cpy(mol);
prepareMolForDrawing(cpy);
// having done the prepare, we don't want to do it again in drawMolecule.
bool old_prep_mol = drawer.drawOptions().prepareMolsBeforeDrawing;
drawer.drawOptions().prepareMolsBeforeDrawing = false;
drawer.drawMolecule(cpy, legend, highlight_atoms, highlight_bonds,
highlight_atom_map, highlight_bond_map, highlight_radii,
confId);
drawer.drawOptions().prepareMolsBeforeDrawing = old_prep_mol;
}
void updateDrawerParamsFromJSON(MolDraw2D &drawer, const char *json) {
PRECONDITION(json, "no parameter string");
updateDrawerParamsFromJSON(drawer, std::string(json));
};
#define PT_OPT_GET(opt) opts.opt = pt.get(#opt, opts.opt)
void get_colour_option(boost::property_tree::ptree *pt, const char *pnm,
DrawColour &colour) {
PRECONDITION(pnm && strlen(pnm), "bad property name");
if (pt->find(pnm) == pt->not_found()) {
return;
}
boost::property_tree::ptree::const_iterator itm = pt->get_child(pnm).begin();
colour.r = itm->second.get_value<float>();
++itm;
colour.g = itm->second.get_value<float>();
++itm;
colour.b = itm->second.get_value<float>();
++itm;
}
void updateDrawerParamsFromJSON(MolDraw2D &drawer, const std::string &json) {
if (json == "") {
return;
}
std::istringstream ss;
ss.str(json);
MolDrawOptions &opts = drawer.drawOptions();
boost::property_tree::ptree pt;
boost::property_tree::read_json(ss, pt);
PT_OPT_GET(atomLabelDeuteriumTritium);
PT_OPT_GET(dummiesAreAttachments);
PT_OPT_GET(circleAtoms);
PT_OPT_GET(continuousHighlight);
PT_OPT_GET(fillHighlights);
PT_OPT_GET(flagCloseContactsDist);
PT_OPT_GET(includeAtomTags);
PT_OPT_GET(clearBackground);
PT_OPT_GET(legendFontSize);
PT_OPT_GET(maxFontSize);
PT_OPT_GET(minFontSize);
PT_OPT_GET(annotationFontScale);
PT_OPT_GET(fontFile);
PT_OPT_GET(multipleBondOffset);
PT_OPT_GET(padding);
PT_OPT_GET(additionalAtomLabelPadding);
PT_OPT_GET(bondLineWidth);
PT_OPT_GET(highlightBondWidthMultiplier);
PT_OPT_GET(prepareMolsBeforeDrawing);
PT_OPT_GET(fixedScale);
PT_OPT_GET(fixedBondLength);
PT_OPT_GET(rotate);
PT_OPT_GET(addStereoAnnotation);
PT_OPT_GET(atomHighlightsAreCircles);
PT_OPT_GET(centreMoleculesBeforeDrawing);
get_colour_option(&pt, "highlightColour", opts.highlightColour);
get_colour_option(&pt, "backgroundColour", opts.backgroundColour);
get_colour_option(&pt, "legendColour", opts.legendColour);
if (pt.find("atomLabels") != pt.not_found()) {
BOOST_FOREACH (boost::property_tree::ptree::value_type const &item,
pt.get_child("atomLabels")) {
opts.atomLabels[boost::lexical_cast<int>(item.first)] =
item.second.get_value<std::string>();
}
}
}
void contourAndDrawGrid(MolDraw2D &drawer, const double *grid,
const std::vector<double> &xcoords,
const std::vector<double> &ycoords, size_t nContours,
std::vector<double> &levels,
const ContourParams ¶ms,
const ROMol *mol) {
PRECONDITION(grid, "no data");
PRECONDITION(params.colourMap.size() > 1,
"colourMap must have at least two entries");
if (params.setScale) {
Point2D minP = {xcoords[0], ycoords[0]};
Point2D maxP = {xcoords.back(), ycoords.back()};
drawer.setScale(drawer.width(), drawer.height(), minP, maxP, mol);
}
size_t nX = xcoords.size();
size_t nY = ycoords.size();
double minV = std::numeric_limits<double>::max();
double maxV = -std::numeric_limits<double>::max();
if (!levels.size() || params.fillGrid) {
for (size_t i = 0; i < nX; ++i) {
for (size_t j = 0; j < nY; ++j) {
minV = std::min(minV, grid[i * nY + j]);
maxV = std::max(maxV, grid[i * nY + j]);
}
}
if (!levels.size()) {
levels.resize(nContours);
for (size_t i = 0; i < nContours; ++i) {
levels[i] = minV + i * (maxV - minV) / (nContours - 1);
}
}
}
if (maxV <= minV) {
return;
}
const auto olw = drawer.lineWidth();
const auto odash = drawer.dash();
const auto ocolor = drawer.colour();
const auto ofill = drawer.fillPolys();
const auto owidth = drawer.lineWidth();
if (params.fillGrid) {
drawer.setFillPolys(true);
drawer.setLineWidth(1);
auto delta = (maxV - minV);
if (params.colourMap.size() > 2) {
// need to find how fractionally far we are from zero, not the min
if (-minV > maxV) {
delta = -minV;
} else {
delta = maxV;
}
}
for (size_t i = 0; i < nX - 1; ++i) {
for (size_t j = 0; j < nY - 1; ++j) {
auto gridV = grid[i * nY + j];
auto fracV = (gridV - minV) / delta;
if (params.colourMap.size() > 2) {
// need to find how fractionally far we are from zero, not the min
fracV = gridV / delta;
if (fracV < 0) {
fracV *= -1;
}
}
auto c1 = (gridV < 0 || params.colourMap.size() == 2)
? params.colourMap[1]
: params.colourMap[1];
auto c2 = (gridV < 0 || params.colourMap.size() == 2)
? params.colourMap[0]
: params.colourMap[2];
auto c = c1 + (c2 - c1) * fracV;
// don't bother drawing boxes that are the same as the background color:
double tol = 0.01;
if (c.feq(drawer.drawOptions().backgroundColour, tol)) {
continue;
}
drawer.setColour(c);
Point2D p1 = {xcoords[i], ycoords[j]};
Point2D p2 = {xcoords[i + 1], ycoords[j + 1]};
drawer.drawRect(p1, p2);
}
}
}
if (nContours) {
if(nContours > levels.size()){
throw ValueErrorException("nContours larger than the size of the level list");
}
std::vector<conrec::ConrecSegment> segs;
conrec::Contour(grid, 0, nX - 1, 0, nY - 1, xcoords.data(), ycoords.data(),
nContours, levels.data(), segs);
static DashPattern negDash = {2, 6};
static DashPattern posDash;
drawer.setColour(params.contourColour);
drawer.setLineWidth(params.contourWidth);
for (const auto &seg : segs) {
if (params.dashNegative && seg.isoVal < 0) {
drawer.setDash(negDash);
} else {
drawer.setDash(posDash);
}
drawer.drawLine(seg.p1, seg.p2);
}
}
drawer.setDash(odash);
drawer.setLineWidth(olw);
drawer.setColour(ocolor);
drawer.setFillPolys(ofill);
drawer.setLineWidth(owidth);
};
void contourAndDrawGaussians(MolDraw2D &drawer,
const std::vector<Point2D> &locs,
const std::vector<double> &weights,
const std::vector<double> &widths,
size_t nContours, std::vector<double> &levels,
const ContourParams ¶ms,
const ROMol *mol) {
PRECONDITION(locs.size() == weights.size(), "size mismatch");
PRECONDITION(locs.size() == widths.size(), "size mismatch");
// start by setting up the grid
if (params.setScale) {
Point2D minP, maxP;
minP.x = minP.y = std::numeric_limits<double>::max();
maxP.x = maxP.y = -std::numeric_limits<double>::max();
for (const auto &loc : locs) {
minP.x = std::min(loc.x, minP.x);
minP.y = std::min(loc.y, minP.y);
maxP.x = std::max(loc.x, maxP.x);
maxP.y = std::max(loc.y, maxP.y);
}
Point2D dims = maxP - minP;
minP.x -= drawer.drawOptions().padding * dims.x;
minP.y -= drawer.drawOptions().padding * dims.y;
maxP.x += drawer.drawOptions().padding * dims.x;
maxP.y += drawer.drawOptions().padding * dims.y;
if (params.extraGridPadding > 0) {
Point2D p1(0, 0), p2(params.extraGridPadding, 0);
double pad =
fabs(drawer.getDrawCoords(p2).x - drawer.getDrawCoords(p1).x);
minP.x -= pad;
minP.y -= pad;
maxP.x += pad;
maxP.y += pad;
}
drawer.setScale(drawer.width(), drawer.height(), minP, maxP, mol);
}
size_t nx = (size_t)ceil(drawer.range().x / params.gridResolution) + 1;
size_t ny = (size_t)ceil(drawer.range().y / params.gridResolution) + 1;
std::vector<double> xcoords(nx);
for (size_t i = 0; i < nx; ++i) {
xcoords[i] = drawer.minPt().x + i * params.gridResolution;
}
std::vector<double> ycoords(ny);
for (size_t i = 0; i < ny; ++i) {
ycoords[i] = drawer.minPt().y + i * params.gridResolution;
}
std::unique_ptr<double[]> grid(new double[nx * ny]);
// populate the grid from the gaussians:
for (size_t ix = 0; ix < nx; ++ix) {
auto px = drawer.minPt().x + ix * params.gridResolution;
for (size_t iy = 0; iy < ny; ++iy) {
auto py = drawer.minPt().y + iy * params.gridResolution;
Point2D pt(px, py);
double accum = 0.0;
for (size_t ig = 0; ig < locs.size(); ++ig) {
auto d2 = (pt - locs[ig]).lengthSq();
auto contrib = weights[ig] / widths[ig] *
exp(-0.5 * d2 / (widths[ig] * widths[ig]));
accum += contrib;
}
grid[ix * ny + iy] = accum / (2 * M_PI);
}
}
// and render it:
ContourParams paramsCopy = params;
paramsCopy.setScale = false; // if scaling was needed, we did it already
contourAndDrawGrid(drawer, grid.get(), xcoords, ycoords, nContours, levels,
paramsCopy);
};
} // namespace MolDraw2DUtils
} // namespace RDKit
| 1 | 21,525 | Should this be boost logged? | rdkit-rdkit | cpp |
@@ -57,9 +57,10 @@ def find_notifiers(notifier_name):
# pylint: enable=inconsistent-return-statements
-def convert_to_timestamp(violations):
+def convert_to_timestamp(session, violations):
"""Convert violation created_at_datetime to timestamp string.
Args:
+ session (object): session object to work on.
violations (sqlalchemy_object): List of violations as sqlalchemy
row/record object with created_at_datetime.
Returns: | 1 | # Copyright 2017 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Notifier service."""
import importlib
import inspect
# pylint: disable=line-too-long
from google.cloud.forseti.common.util import logger
from google.cloud.forseti.common.util import string_formats
from google.cloud.forseti.notifier.notifiers import findings
from google.cloud.forseti.notifier.notifiers import email_inventory_snapshot_summary as inv_summary
from google.cloud.forseti.notifier.notifiers import email_scanner_summary as scanner_summary
from google.cloud.forseti.notifier.notifiers.base_notification import BaseNotification
from google.cloud.forseti.services.inventory.storage import DataAccess
from google.cloud.forseti.services.scanner import dao as scanner_dao
# pylint: enable=line-too-long
LOGGER = logger.get_logger(__name__)
# pylint: disable=inconsistent-return-statements
def find_notifiers(notifier_name):
"""Get the first class in the given sub module
Args:
notifier_name (str): Name of the notifier.
Return:
class: The class in the sub module
"""
try:
module = importlib.import_module(
'google.cloud.forseti.notifier.notifiers.{0}'.format(
notifier_name))
for filename in dir(module):
obj = getattr(module, filename)
if inspect.isclass(obj) \
and issubclass(obj, BaseNotification) \
and obj is not BaseNotification:
return obj
except ImportError as e:
LOGGER.error('Can\'t import notifier %s: %s', notifier_name, e.message)
# pylint: enable=inconsistent-return-statements
def convert_to_timestamp(violations):
"""Convert violation created_at_datetime to timestamp string.
Args:
violations (sqlalchemy_object): List of violations as sqlalchemy
row/record object with created_at_datetime.
Returns:
list: List of violations as sqlalchemy row/record object with
created_at_datetime converted to timestamp string.
"""
for violation in violations:
violation.created_at_datetime = (
violation.created_at_datetime.strftime(
string_formats.TIMESTAMP_TIMEZONE))
return violations
def process(message):
"""Process messages about what notifications to send.
Args:
message (dict): Message with payload in dict.
The payload will be different depending on the sender
of the message.
Example:
{'status': 'foobar_done',
'payload': {}}
"""
payload = message.get('payload')
if message.get('status') == 'inventory_done':
inv_email_notifier = inv_summary.EmailInventorySnapshotSummary(
payload.get('sendgrid_api_key')
)
inv_email_notifier.run(
payload.get('cycle_time'),
payload.get('cycle_timestamp'),
payload.get('snapshot_cycle_status'),
payload.get('notifiers'),
payload.get('email_sender'),
payload.get('email_recipient')
)
return
if message.get('status') == 'scanner_done':
scanner_email_notifier = scanner_summary.EmailScannerSummary(
payload.get('sendgrid_api_key')
)
scanner_email_notifier.run(
payload.get('output_csv_name'),
payload.get('output_filename'),
payload.get('now_utc'),
payload.get('all_violations'),
payload.get('resource_counts'),
payload.get('violation_errors'),
payload.get('email_sender'),
payload.get('email_recipient'),
payload.get('email_description'))
return
def run(inventory_index_id, progress_queue, service_config=None):
"""Run the notifier.
Entry point when the notifier is run as a library.
Args:
inventory_index_id (str): Inventory index id.
progress_queue (Queue): The progress queue.
service_config (ServiceConfig): Forseti 2.0 service configs.
Returns:
int: Status code.
"""
# pylint: disable=too-many-locals
global_configs = service_config.get_global_config()
notifier_configs = service_config.get_notifier_config()
if not inventory_index_id:
with service_config.scoped_session() as session:
inventory_index_id = (
DataAccess.get_latest_inventory_index_id(session))
# get violations
violation_access_cls = scanner_dao.define_violation(
service_config.engine)
violation_access = violation_access_cls(service_config.engine)
service_config.violation_access = violation_access
violations = violation_access.list(inventory_index_id)
violations = convert_to_timestamp(violations)
violations_as_dict = []
for violation in violations:
violations_as_dict.append(
scanner_dao.convert_sqlalchemy_object_to_dict(violation))
violations = scanner_dao.map_by_resource(violations_as_dict)
for retrieved_v in violations:
log_message = ('Retrieved {} violations for resource \'{}\''.format(
len(violations[retrieved_v]), retrieved_v))
LOGGER.info(log_message)
progress_queue.put(log_message)
# build notification notifiers
notifiers = []
for resource in notifier_configs['resources']:
if violations.get(resource['resource']) is None:
log_message = 'Resource \'{}\' has no violations'.format(
resource['resource'])
progress_queue.put(log_message)
LOGGER.info(log_message)
continue
if not resource['should_notify']:
LOGGER.debug('Not notifying for: %s', resource['resource'])
continue
for notifier in resource['notifiers']:
log_message = 'Running \'{}\' notifier for resource \'{}\''.format(
notifier['name'], resource['resource'])
progress_queue.put(log_message)
LOGGER.info(log_message)
chosen_pipeline = find_notifiers(notifier['name'])
notifiers.append(chosen_pipeline(resource['resource'],
inventory_index_id,
violations[resource['resource']],
global_configs,
notifier_configs,
notifier['configuration']))
# Run the notifiers.
for notifier in notifiers:
notifier.run()
if (notifier_configs.get('violation') and
notifier_configs.get('violation').get('findings').get('enabled')):
findings.Findingsnotifier().run(
violations_as_dict,
notifier_configs.get('violation').get('findings').get('gcs_path'))
log_message = 'Notification completed!'
progress_queue.put(log_message)
progress_queue.put(None)
LOGGER.info(log_message)
return 0
| 1 | 29,611 | What is the reason for doing expunge here? This method is purely for converting the timestamp. | forseti-security-forseti-security | py |
@@ -86,9 +86,15 @@ class TypeToSchema extends TypeUtil.SchemaVisitor<Schema> {
List<Schema.Field> fields = Lists.newArrayListWithExpectedSize(fieldSchemas.size());
for (int i = 0; i < structFields.size(); i += 1) {
Types.NestedField structField = structFields.get(i);
+ String origFieldName = structField.name();
+ boolean isValid = validAvroName(origFieldName);
+ String fieldName = isValid ? origFieldName : sanitize(origFieldName);
Schema.Field field = new Schema.Field(
- structField.name(), fieldSchemas.get(i), null,
+ sanitize(fieldName), fieldSchemas.get(i), null,
structField.isOptional() ? JsonProperties.NULL_VALUE : null);
+ if (!isValid) {
+ field.addProp(AvroSchemaUtil.ORIGINAL_FIELD_NAME_PROP, origFieldName);
+ }
field.addProp(AvroSchemaUtil.FIELD_ID_PROP, structField.fieldId());
fields.add(field);
} | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg.avro;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import java.util.List;
import java.util.Map;
import org.apache.avro.JsonProperties;
import org.apache.avro.LogicalTypes;
import org.apache.avro.Schema;
import org.apache.iceberg.types.Type;
import org.apache.iceberg.types.TypeUtil;
import org.apache.iceberg.types.Types;
class TypeToSchema extends TypeUtil.SchemaVisitor<Schema> {
private static final Schema BOOLEAN_SCHEMA = Schema.create(Schema.Type.BOOLEAN);
private static final Schema INTEGER_SCHEMA = Schema.create(Schema.Type.INT);
private static final Schema LONG_SCHEMA = Schema.create(Schema.Type.LONG);
private static final Schema FLOAT_SCHEMA = Schema.create(Schema.Type.FLOAT);
private static final Schema DOUBLE_SCHEMA = Schema.create(Schema.Type.DOUBLE);
private static final Schema DATE_SCHEMA = LogicalTypes.date()
.addToSchema(Schema.create(Schema.Type.INT));
private static final Schema TIME_SCHEMA = LogicalTypes.timeMicros()
.addToSchema(Schema.create(Schema.Type.LONG));
private static final Schema TIMESTAMP_SCHEMA = LogicalTypes.timestampMicros()
.addToSchema(Schema.create(Schema.Type.LONG));
private static final Schema TIMESTAMPTZ_SCHEMA = LogicalTypes.timestampMicros()
.addToSchema(Schema.create(Schema.Type.LONG));
private static final Schema STRING_SCHEMA = Schema.create(Schema.Type.STRING);
private static final Schema UUID_SCHEMA = LogicalTypes.uuid()
.addToSchema(Schema.createFixed("uuid_fixed", null, null, 16));
private static final Schema BINARY_SCHEMA = Schema.create(Schema.Type.BYTES);
static {
TIMESTAMP_SCHEMA.addProp(AvroSchemaUtil.ADJUST_TO_UTC_PROP, false);
TIMESTAMPTZ_SCHEMA.addProp(AvroSchemaUtil.ADJUST_TO_UTC_PROP, true);
}
private final Map<Type, Schema> results = Maps.newHashMap();
private final Map<Types.StructType, String> names;
TypeToSchema(Map<Types.StructType, String> names) {
this.names = names;
}
Map<Type, Schema> getConversionMap() {
return results;
}
@Override
public Schema schema(org.apache.iceberg.Schema schema, Schema structSchema) {
return structSchema;
}
@Override
public Schema struct(Types.StructType struct, List<Schema> fieldSchemas) {
Schema recordSchema = results.get(struct);
if (recordSchema != null) {
return recordSchema;
}
String recordName = names.get(struct);
if (recordName == null) {
recordName = "r" + fieldIds().peek();
}
List<Types.NestedField> structFields = struct.fields();
List<Schema.Field> fields = Lists.newArrayListWithExpectedSize(fieldSchemas.size());
for (int i = 0; i < structFields.size(); i += 1) {
Types.NestedField structField = structFields.get(i);
Schema.Field field = new Schema.Field(
structField.name(), fieldSchemas.get(i), null,
structField.isOptional() ? JsonProperties.NULL_VALUE : null);
field.addProp(AvroSchemaUtil.FIELD_ID_PROP, structField.fieldId());
fields.add(field);
}
recordSchema = Schema.createRecord(recordName, null, null, false, fields);
results.put(struct, recordSchema);
return recordSchema;
}
@Override
public Schema field(Types.NestedField field, Schema fieldSchema) {
if (field.isOptional()) {
return AvroSchemaUtil.toOption(fieldSchema);
} else {
return fieldSchema;
}
}
@Override
public Schema list(Types.ListType list, Schema elementSchema) {
Schema listSchema = results.get(list);
if (listSchema != null) {
return listSchema;
}
if (list.isElementOptional()) {
listSchema = Schema.createArray(AvroSchemaUtil.toOption(elementSchema));
} else {
listSchema = Schema.createArray(elementSchema);
}
listSchema.addProp(AvroSchemaUtil.ELEMENT_ID_PROP, list.elementId());
results.put(list, listSchema);
return listSchema;
}
@Override
public Schema map(Types.MapType map, Schema keySchema, Schema valueSchema) {
Schema mapSchema = results.get(map);
if (mapSchema != null) {
return mapSchema;
}
if (keySchema.getType() == Schema.Type.STRING) {
// if the map has string keys, use Avro's map type
mapSchema = Schema.createMap(
map.isValueOptional() ? AvroSchemaUtil.toOption(valueSchema) : valueSchema);
mapSchema.addProp(AvroSchemaUtil.KEY_ID_PROP, map.keyId());
mapSchema.addProp(AvroSchemaUtil.VALUE_ID_PROP, map.valueId());
} else {
mapSchema = AvroSchemaUtil.createMap(map.keyId(), keySchema,
map.valueId(), map.isValueOptional() ? AvroSchemaUtil.toOption(valueSchema) : valueSchema);
}
results.put(map, mapSchema);
return mapSchema;
}
@Override
public Schema primitive(Type.PrimitiveType primitive) {
Schema primitiveSchema;
switch (primitive.typeId()) {
case BOOLEAN:
primitiveSchema = BOOLEAN_SCHEMA;
break;
case INTEGER:
primitiveSchema = INTEGER_SCHEMA;
break;
case LONG:
primitiveSchema = LONG_SCHEMA;
break;
case FLOAT:
primitiveSchema = FLOAT_SCHEMA;
break;
case DOUBLE:
primitiveSchema = DOUBLE_SCHEMA;
break;
case DATE:
primitiveSchema = DATE_SCHEMA;
break;
case TIME:
primitiveSchema = TIME_SCHEMA;
break;
case TIMESTAMP:
if (((Types.TimestampType) primitive).shouldAdjustToUTC()) {
primitiveSchema = TIMESTAMPTZ_SCHEMA;
} else {
primitiveSchema = TIMESTAMP_SCHEMA;
}
break;
case STRING:
primitiveSchema = STRING_SCHEMA;
break;
case UUID:
primitiveSchema = UUID_SCHEMA;
break;
case FIXED:
Types.FixedType fixed = (Types.FixedType) primitive;
primitiveSchema = Schema.createFixed("fixed_" + fixed.length(), null, null, fixed.length());
break;
case BINARY:
primitiveSchema = BINARY_SCHEMA;
break;
case DECIMAL:
Types.DecimalType decimal = (Types.DecimalType) primitive;
primitiveSchema = LogicalTypes.decimal(decimal.precision(), decimal.scale())
.addToSchema(Schema.createFixed(
"decimal_" + decimal.precision() + "_" + decimal.scale(),
null, null, TypeUtil.decimalRequriedBytes(decimal.precision())));
break;
default:
throw new UnsupportedOperationException(
"Unsupported type ID: " + primitive.typeId());
}
results.put(primitive, primitiveSchema);
return primitiveSchema;
}
}
| 1 | 13,898 | This calls sanitize twice if the name isn't valid. | apache-iceberg | java |
@@ -0,0 +1,13 @@
+class Proposal < ActiveRecord::Base
+ has_one :cart
+
+ validates :flow, presence: true, inclusion: {in: ApprovalGroup::FLOWS}
+ validates :status, presence: true, inclusion: {in: Approval::STATUSES}
+
+ after_initialize :set_default_flow
+
+
+ def set_default_flow
+ self.flow ||= 'parallel'
+ end
+end | 1 | 1 | 12,120 | Is there an equivalent that'd allow zero or one? | 18F-C2 | rb |
|
@@ -234,6 +234,13 @@ func (m *taskExecutor) repeatWith() (err error) {
m.resetK8sClient(resource)
}
+ if rwExec.isTaskObjectNameRepeat() {
+ // if repetition is based on task object name itself, then the task's
+ // object name needs to be set
+ m.metaTaskExec.setObjectName(resource)
+ m.objectName = resource
+ }
+
// set the currently active repeat resource
util.SetNestedField(m.templateValues, resource, string(v1alpha1.ListItemsTLP), string(v1alpha1.CurrentRepeatResourceLITP))
| 1 | /*
Copyright 2017 The OpenEBS Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package task
import (
"fmt"
"strings"
"time"
"github.com/golang/glog"
"github.com/openebs/maya/pkg/apis/openebs.io/v1alpha1"
m_k8s_client "github.com/openebs/maya/pkg/client/k8s"
m_k8s "github.com/openebs/maya/pkg/k8s"
"github.com/openebs/maya/pkg/template"
"github.com/openebs/maya/pkg/util"
api_apps_v1beta1 "k8s.io/api/apps/v1beta1"
api_core_v1 "k8s.io/api/core/v1"
api_extn_v1beta1 "k8s.io/api/extensions/v1beta1"
)
// TaskExecutor is the interface that provides a contract method to execute
// tasks
type TaskExecutor interface {
Execute() (err error)
}
// TaskOutputExecutor is the interface that provides a contract method to
// generate output in a pre-defined format. The output format is specified in
// the task.
type TaskOutputExecutor interface {
Output() (output []byte, err error)
}
// TODO
// Refactor this to a Kubernetes Custom Resource
//
// RunTask composes various specifications of a task
type RunTask struct {
// Name of this task
Name string
// MetaYml is the specifications about meta information of this run task
MetaYml string
// TaskYml is the specifications about this run task
TaskYml string
// PostRunTemplateFuncs is a set of go template functions that is run
// against the result of this task's execution. In other words, this
// template is run post the task execution.
PostRunTemplateFuncs string
}
type taskExecutor struct {
// identity is the id of the task
identity string
// templateValues will hold the values that will be applied against
// the task's specification (which is a go template) before this task gets
// executed
templateValues map[string]interface{}
// objectName is the name of the object
//
// NOTE:
// object refers to the result of task execution
objectName string
// taskResultQueries is a set of queries that are run
// against the object after successful execution of the task
taskResultQueries []TaskResultQuery
// taskPatch is a set of patches that get applied against
// the task object
taskPatch TaskPatch
// metaTaskExec is the instance to be used to execute meta
// operations on this task
metaTaskExec *metaTaskExecutor
// runtask is the specifications that determine a task & operations associated
// with it
runtask RunTask
// k8sClient will be used to make K8s API calls
k8sClient *m_k8s_client.K8sClient
}
// newK8sClient returns a new instance of K8sClient based on the provided run
// namespace.
//
// NOTE:
// Providing a run namespace can be optional. It is optional for cluster wide
// operations.
//
// NOTE:
// In cases where more than one namespaces are involved, **repeatWith**
// metatask property is used.
func newK8sClient(runNamespace string) (kc *m_k8s_client.K8sClient, err error) {
ns := strings.TrimSpace(runNamespace)
kc, err = m_k8s_client.NewK8sClient(ns)
return
}
// resetK8sClient returns a new instance of taskExecutor pointing to a new
// namespace
func (m *taskExecutor) resetK8sClient(namespace string) (*taskExecutor, error) {
// client to make K8s API calls using the provided namespace
kc, err := newK8sClient(namespace)
if err != nil {
return nil, err
}
// reset the k8s client
m.k8sClient = kc
return m, nil
}
// newTaskExecutor returns a new instance of taskExecutor
func newTaskExecutor(runtask RunTask, values map[string]interface{}) (*taskExecutor, error) {
mte, err := newMetaTaskExecutor(runtask.MetaYml, values)
if err != nil {
return nil, err
}
// client to make K8s API calls using the namespace on
// which this task is supposed to be executed
kc, err := newK8sClient(mte.getRunNamespace())
if err != nil {
return nil, err
}
return &taskExecutor{
templateValues: values,
identity: mte.getIdentity(),
objectName: mte.getObjectName(),
taskResultQueries: mte.getTaskResultQueries(),
metaTaskExec: mte,
runtask: runtask,
k8sClient: kc,
}, nil
}
// String provides the essential task executor details
func (m *taskExecutor) String() string {
return fmt.Sprintf("task with identity '%s' and with objectname '%s'", m.identity, m.objectName)
}
// getTaskIdentity gets the meta task executor value
func (m *taskExecutor) getTaskIdentity() string {
return m.identity
}
// Output returns the result of templating this task's yaml
//
// This implements TaskOutputExecutor interface
func (m *taskExecutor) Output() (output []byte, err error) {
output, err = template.AsTemplatedBytes("Output", m.runtask.TaskYml, m.templateValues)
return
}
// getTaskResultNotFoundError fetches the NotFound error if any from this
// runtask's template values
//
// NOTE:
// Logic to determine NotFound error is set at PostRunTemplateFuncs & is
// executed during post task execution phase. NotFound error is set if specified
// items or properties are not found. This error is set in the runtask's
// template values.
//
// NOTE:
// Below property is set with verification error if any:
// .TaskResult.<taskID>.notFoundErr
func (m *taskExecutor) getTaskResultNotFoundError() interface{} {
return util.GetNestedField(m.templateValues, string(v1alpha1.TaskResultTLP), m.identity, string(v1alpha1.TaskResultNotFoundErrTRTP))
}
// getTaskResultVerifyError fetches the verification error if any from this
// runtask's template values
//
// NOTE:
// Logic to determine Verify error is set at PostRunTemplateFuncs & is
// executed during post task execution phase. Verify error is set if specified
// verifications fail. This error is set in the runtask's template values.
//
// NOTE:
// Below property is set with verification error if any:
// .TaskResult.<taskID>.verifyErr
func (m *taskExecutor) getTaskResultVerifyError() interface{} {
return util.GetNestedField(m.templateValues, string(v1alpha1.TaskResultTLP), m.identity, string(v1alpha1.TaskResultVerifyErrTRTP))
}
// resetTaskResultVerifyError resets the verification error from this runtask's
// template values
//
// NOTE:
// reset here implies setting the verification err's placeholder value to nil
//
// Below property is reset with `nil`:
// .TaskResult.<taskID>.verifyErr
//
// NOTE:
// Verification error is set during the post task execution phase if there are
// any verification error. This error is set in the runtask's template values.
func (m *taskExecutor) resetTaskResultVerifyError() {
util.SetNestedField(m.templateValues, nil, string(v1alpha1.TaskResultTLP), m.identity, string(v1alpha1.TaskResultVerifyErrTRTP))
}
// repeatWith repeats execution of the task based on the repeatWith property
// set in meta task specifications. The same task is executed repeatedly based
// on the resource names set against the repeatWith property.
//
// NOTE:
// Each task execution depends on the currently active repeat resource.
func (m *taskExecutor) repeatWith() (err error) {
rwExec := m.metaTaskExec.getRepeatWithResourceExecutor()
if !rwExec.isRepeat() {
// no need to repeat if this task is not meant to be repeated;
// so execute once & return
err = m.retryOnVerificationError()
return
}
// execute the task function based on the repeat resources
for _, resource := range rwExec.getResources() {
if rwExec.isNamespaceRepeat() {
// if repetition is based on namespace, then the k8s client needs to
// point to proper namespace before executing the task
m.resetK8sClient(resource)
}
// set the currently active repeat resource
util.SetNestedField(m.templateValues, resource, string(v1alpha1.ListItemsTLP), string(v1alpha1.CurrentRepeatResourceLITP))
// execute the task function
err = m.retryOnVerificationError()
if err != nil {
// stop repetition on unhandled runtime error & return
return
}
}
return
}
// retryOnVerificationError retries execution of the task if the task execution
// resulted into verification error. The number of retry attempts & interval
// between each attempt is specified in the task's meta specification.
func (m *taskExecutor) retryOnVerificationError() (err error) {
retryAttempts, interval := m.metaTaskExec.getRetry()
// original invocation as well as all retry attempts
// i == 0 implies original task execute invocation
// i > 0 implies a retry operation
for i := 0; i <= retryAttempts; i++ {
// first reset the previous verify error if any
m.resetTaskResultVerifyError()
// execute the task function
err = m.ExecuteIt()
if err != nil {
// break this retry execution loop if there were any runtime errors
return
}
// check for VerifyError if any
//
// NOTE:
// VerifyError is a handled runtime error which is handled via templating
//
// NOTE:
// retry is done only if VerifyError is thrown during post task
// execution
verifyErr := m.getTaskResultVerifyError()
if verifyErr == nil {
// no need to retry if task execution was a success & there was no
// verification error found with the task result
return
}
// current verify error
err, _ = verifyErr.(*template.VerifyError)
if i != retryAttempts {
glog.Warningf("verify error was found during post runtask operations '%s': error '%#v': will retry task execution'%d'", m.identity, err, i+1)
// will retry after the specified interval
time.Sleep(interval)
}
}
// return after exhausting the original invocation and all retries;
// verification error of the final attempt will be returned here
return
}
// Execute executes a runtask by following the directives specified in the
// runtask's meta specifications and other conditions like presence of VerifyErr
func (m *taskExecutor) Execute() (err error) {
return m.repeatWith()
}
// postExecuteIt executes a go template against the provided template values.
// This is run after executing a task.
//
// NOTE:
// This go template is a set of template functions that queries specified
// properties from the result due to the task's execution & storing it at
// placeholders within the **template values**. This is done to query these
// extracted values while executing later runtasks by providing these runtasks
// with the updated **template values**.
func (m *taskExecutor) postExecuteIt() (err error) {
if len(m.runtask.PostRunTemplateFuncs) == 0 {
// nothing needs to be done
return
}
// post runtask operation
_, err = template.AsTemplatedBytes("PostRunTemplateFuncs", m.runtask.PostRunTemplateFuncs, m.templateValues)
if err != nil {
return
}
// NotFound error is a handled runtime error. It is thrown during go template
// execution & set is in the template values. This needs to checked and thrown
// as an error.
notFoundErr := m.getTaskResultNotFoundError()
if notFoundErr != nil {
glog.Warningf("notfound error during post runtask operations '%s': error '%#v'", m.identity, notFoundErr)
err, _ = notFoundErr.(*template.NotFoundError)
}
return
}
// ExecuteIt will execute the runtask based on its meta specs & task specs
func (m *taskExecutor) ExecuteIt() (err error) {
if m.k8sClient == nil {
emsg := "failed to execute task: nil k8s client: verify if run namespace was available"
glog.Errorf(fmt.Sprintf("%s: metatask '%#v'", emsg, m.metaTaskExec.getMetaInfo()))
err = fmt.Errorf("%s: task '%s'", emsg, m.getTaskIdentity())
return
}
if m.metaTaskExec.isPutExtnV1B1Deploy() {
err = m.putExtnV1B1Deploy()
} else if m.metaTaskExec.isPutAppsV1B1Deploy() {
err = m.putAppsV1B1Deploy()
} else if m.metaTaskExec.isPatchExtnV1B1Deploy() {
err = m.patchExtnV1B1Deploy()
} else if m.metaTaskExec.isPatchAppsV1B1Deploy() {
err = m.patchAppsV1B1Deploy()
} else if m.metaTaskExec.isPutCoreV1Service() {
err = m.putCoreV1Service()
} else if m.metaTaskExec.isDeleteExtnV1B1Deploy() {
err = m.deleteExtnV1B1Deployment()
} else if m.metaTaskExec.isDeleteAppsV1B1Deploy() {
err = m.deleteAppsV1B1Deployment()
} else if m.metaTaskExec.isDeleteCoreV1Service() {
err = m.deleteCoreV1Service()
} else if m.metaTaskExec.isGetOEV1alpha1SP() {
err = m.getOEV1alpha1SP()
} else if m.metaTaskExec.isGetCoreV1PVC() {
err = m.getCoreV1PVC()
} else if m.metaTaskExec.isList() {
err = m.listK8sResources()
} else {
err = fmt.Errorf("failed to execute task: not a supported operation: meta info '%#v'", m.metaTaskExec.getMetaInfo())
}
if err != nil {
return
}
// run the post operations after a runtask is executed
return m.postExecuteIt()
}
// asRollbackInstance will provide the rollback instance w.r.t this task's instance
func (m *taskExecutor) asRollbackInstance(objectName string) (*taskExecutor, error) {
mte, willRollback, err := m.metaTaskExec.asRollbackInstance(objectName)
if err != nil {
return nil, err
}
if !willRollback {
// no need of rollback
return nil, nil
}
kc, err := m_k8s_client.NewK8sClient(mte.getRunNamespace())
if err != nil {
return nil, err
}
// Only the meta info is required for a rollback. In
// other words no need of task yaml template & values
return &taskExecutor{
identity: m.identity,
objectName: mte.getObjectName(),
metaTaskExec: mte,
k8sClient: kc,
}, nil
}
// asAppsV1B1Deploy generates a K8s Deployment object
// out of the embedded yaml
func (m *taskExecutor) asAppsV1B1Deploy() (*api_apps_v1beta1.Deployment, error) {
d, err := m_k8s.NewDeploymentYml("AppsV1B1Deploy", m.runtask.TaskYml, m.templateValues)
if err != nil {
return nil, err
}
return d.AsAppsV1B1Deployment()
}
// asExtnV1B1Deploy generates a K8s Deployment object
// out of the embedded yaml
func (m *taskExecutor) asExtnV1B1Deploy() (*api_extn_v1beta1.Deployment, error) {
d, err := m_k8s.NewDeploymentYml("ExtnV1B11Deploy", m.runtask.TaskYml, m.templateValues)
if err != nil {
return nil, err
}
return d.AsExtnV1B1Deployment()
}
// asCoreV1Svc generates a K8s Service object
// out of the embedded yaml
func (m *taskExecutor) asCoreV1Svc() (*api_core_v1.Service, error) {
s, err := m_k8s.NewServiceYml("CoreV1Svc", m.runtask.TaskYml, m.templateValues)
if err != nil {
return nil, err
}
return s.AsCoreV1Service()
}
// putAppsV1B1Deploy will put (i.e. apply to a kubernetes cluster) a Deployment
// object. The Deployment specs is configured in the RunTask.
func (m *taskExecutor) putAppsV1B1Deploy() (err error) {
d, err := m.asAppsV1B1Deploy()
if err != nil {
return
}
deploy, err := m.k8sClient.CreateAppsV1B1DeploymentAsRaw(d)
if err != nil {
return
}
util.SetNestedField(m.templateValues, deploy, string(v1alpha1.CurrentJsonResultTLP))
return
}
// putExtnV1B1Deploy will put (i.e. apply to kubernetes cluster) a Deployment
// whose specifications are defined in the RunTask
func (m *taskExecutor) putExtnV1B1Deploy() (err error) {
d, err := m.asExtnV1B1Deploy()
if err != nil {
return
}
deploy, err := m.k8sClient.CreateExtnV1B1DeploymentAsRaw(d)
if err != nil {
return
}
util.SetNestedField(m.templateValues, deploy, string(v1alpha1.CurrentJsonResultTLP))
return
}
// patchAppsV1B1Deploy will patch a Deployment object in a kubernetes cluster.
// The patch specifications as configured in the RunTask
func (m *taskExecutor) patchAppsV1B1Deploy() (err error) {
err = fmt.Errorf("patchAppsV1B1Deploy is not implemented")
return
}
// patchExtnV1B1Deploy will patch a Deployment where the patch specifications
// are configured in the RunTask
func (m *taskExecutor) patchExtnV1B1Deploy() (err error) {
patch, err := asTaskPatch("ExtnV1B1DeployPatch", m.runtask.TaskYml, m.templateValues)
if err != nil {
return
}
pe, err := newTaskPatchExecutor(patch)
if err != nil {
return
}
raw, err := pe.toJson()
if err != nil {
return
}
// patch the deployment
deploy, err := m.k8sClient.PatchExtnV1B1DeploymentAsRaw(m.objectName, pe.patchType(), raw)
if err != nil {
return
}
util.SetNestedField(m.templateValues, deploy, string(v1alpha1.CurrentJsonResultTLP))
return
}
// deleteAppsV1B1Deployment will delete one or more Deployments as specified in
// the RunTask
func (m *taskExecutor) deleteAppsV1B1Deployment() (err error) {
objectNames := strings.Split(strings.TrimSpace(m.objectName), ",")
for _, name := range objectNames {
err = m.k8sClient.DeleteAppsV1B1Deployment(name)
if err != nil {
return
}
}
return
}
// deleteExtnV1B1Deployment will delete one or more Deployments as specified in
// the RunTask
func (m *taskExecutor) deleteExtnV1B1Deployment() (err error) {
objectNames := strings.Split(strings.TrimSpace(m.objectName), ",")
for _, name := range objectNames {
err = m.k8sClient.DeleteExtnV1B1Deployment(name)
if err != nil {
return
}
}
return
}
// putCoreV1Service will put a Service whose specs are configured in the RunTask
func (m *taskExecutor) putCoreV1Service() (err error) {
s, err := m.asCoreV1Svc()
if err != nil {
return
}
svc, err := m.k8sClient.CreateCoreV1ServiceAsRaw(s)
if err != nil {
return
}
util.SetNestedField(m.templateValues, svc, string(v1alpha1.CurrentJsonResultTLP))
return
}
// deleteCoreV1Service will delete one or more services as specified in
// the RunTask
func (m *taskExecutor) deleteCoreV1Service() (err error) {
objectNames := strings.Split(strings.TrimSpace(m.objectName), ",")
for _, name := range objectNames {
err = m.k8sClient.DeleteCoreV1Service(name)
if err != nil {
return
}
}
return
}
// getOEV1alpha1SP will get the StoragePool as specified in the RunTask
func (m *taskExecutor) getOEV1alpha1SP() (err error) {
sp, err := m.k8sClient.GetOEV1alpha1SPAsRaw(m.objectName)
if err != nil {
return
}
util.SetNestedField(m.templateValues, sp, string(v1alpha1.CurrentJsonResultTLP))
return
}
// getExtnV1B1Deployment will get the Deployment as specified in the RunTask
func (m *taskExecutor) getExtnV1B1Deployment() (err error) {
deploy, err := m.k8sClient.GetExtnV1B1DeploymentAsRaw(m.objectName)
if err != nil {
return
}
util.SetNestedField(m.templateValues, deploy, string(v1alpha1.CurrentJsonResultTLP))
return
}
// getAppsV1B1Deployment will get the Deployment as specified in the RunTask
func (m *taskExecutor) getAppsV1B1Deployment() (err error) {
deploy, err := m.k8sClient.GetAppsV1B1DeploymentAsRaw(m.objectName)
if err != nil {
return
}
util.SetNestedField(m.templateValues, deploy, string(v1alpha1.CurrentJsonResultTLP))
return
}
// getCoreV1PVC will get the PVC as specified in the RunTask
func (m *taskExecutor) getCoreV1PVC() (err error) {
pvc, err := m.k8sClient.GetCoreV1PVCAsRaw(m.objectName)
if err != nil {
return
}
util.SetNestedField(m.templateValues, pvc, string(v1alpha1.CurrentJsonResultTLP))
return
}
// listK8sResources will list resources as specified in the RunTask
func (m *taskExecutor) listK8sResources() (err error) {
opts, err := m.metaTaskExec.getListOptions()
if err != nil {
return
}
var op []byte
if m.metaTaskExec.isListCoreV1Pod() {
op, err = m.k8sClient.ListCoreV1PodAsRaw(opts)
} else if m.metaTaskExec.isListCoreV1Service() {
op, err = m.k8sClient.ListCoreV1ServiceAsRaw(opts)
} else if m.metaTaskExec.isListExtnV1B1Deploy() {
op, err = m.k8sClient.ListExtnV1B1DeploymentAsRaw(opts)
} else if m.metaTaskExec.isListAppsV1B1Deploy() {
op, err = m.k8sClient.ListAppsV1B1DeploymentAsRaw(opts)
} else if m.metaTaskExec.isListCoreV1PVC() {
op, err = m.k8sClient.ListCoreV1PVCAsRaw(opts)
} else {
err = fmt.Errorf("failed to list k8s resources: meta task not supported: task details '%#v'", m.metaTaskExec.getTaskIdentity())
}
if err != nil {
return
}
// set the json doc result
util.SetNestedField(m.templateValues, op, string(v1alpha1.CurrentJsonResultTLP))
return
}
| 1 | 8,698 | Is it possible to update/re-evaluate entire metatask object for every repeat? If we decide/need to use a repeatWith resources in let us say labelSelector(bad example) then we would have to add another logic to update those properties. | openebs-maya | go |
@@ -47,7 +47,7 @@ func accountImport(args []string) string {
}
wallet := cfg.Wallet
fmt.Printf("#%s: Enter your private key, which will not be exposed on the screen.\n", name)
- privateKeyBytes, err := terminal.ReadPassword(syscall.Stdin)
+ privateKeyBytes, err := terminal.ReadPassword(int(syscall.Stdin))
if err != nil {
log.L().Error("fail to get private key", zap.Error(err))
return err.Error() | 1 | // Copyright (c) 2019 IoTeX
// This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no
// warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent
// permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache
// License 2.0 that can be found in the LICENSE file.
package account
import (
"fmt"
"io/ioutil"
"strings"
"syscall"
"github.com/spf13/cobra"
"go.uber.org/zap"
"golang.org/x/crypto/ssh/terminal"
"gopkg.in/yaml.v2"
"github.com/iotexproject/iotex-core/cli/ioctl/cmd/config"
"github.com/iotexproject/iotex-core/cli/ioctl/validator"
"github.com/iotexproject/iotex-core/pkg/log"
)
// accountImportCmd represents the account create command
var accountImportCmd = &cobra.Command{
Use: "import NAME",
Short: "import IoTeX private key into wallet",
Args: cobra.ExactArgs(1),
Run: func(cmd *cobra.Command, args []string) {
fmt.Println(accountImport(args))
},
}
func accountImport(args []string) string {
// Validate inputs
if err := validator.ValidateName(args[0]); err != nil {
return err.Error()
}
name := args[0]
cfg, err := config.LoadConfig()
if err != nil {
return err.Error()
}
if _, ok := cfg.AccountList[name]; ok {
return fmt.Sprintf("A account named \"%s\" already exists.", name)
}
wallet := cfg.Wallet
fmt.Printf("#%s: Enter your private key, which will not be exposed on the screen.\n", name)
privateKeyBytes, err := terminal.ReadPassword(syscall.Stdin)
if err != nil {
log.L().Error("fail to get private key", zap.Error(err))
return err.Error()
}
privateKey := strings.TrimSpace(string(privateKeyBytes))
addr, err := newAccountByKey(name, privateKey, wallet)
if err != nil {
return err.Error()
}
cfg.AccountList[name] = addr
out, err := yaml.Marshal(&cfg)
if err != nil {
return err.Error()
}
if err := ioutil.WriteFile(config.DefaultConfigFile, out, 0600); err != nil {
return fmt.Sprintf("Failed to write to config file %s.", config.DefaultConfigFile)
}
return fmt.Sprintf(
"New account \"%s\" is created. Keep your password, or your will lose your private key.",
name,
)
}
| 1 | 16,474 | unnecessary conversion (from `unconvert`) | iotexproject-iotex-core | go |
@@ -30,7 +30,6 @@ int main (int argc, char *argv[])
int universe_size;
char *kvsname;
char *val;
- char pmi_fd[16];
char pmi_rank[16];
char pmi_size[16];
int result; | 1 | /************************************************************\
* Copyright 2019 Lawrence Livermore National Security, LLC
* (c.f. AUTHORS, NOTICE.LLNS, COPYING)
*
* This file is part of the Flux resource manager framework.
* For details, see https://github.com/flux-framework.
*
* SPDX-License-Identifier: LGPL-3.0
\************************************************************/
#if HAVE_CONFIG_H
#include "config.h"
#endif
#include "src/common/libutil/oom.h"
#include "src/common/libutil/xzmalloc.h"
#include "src/common/libpmi/simple_client.h"
#include "src/common/libpmi/dgetline.h"
#include "src/common/libpmi/pmi.h"
#include "src/common/libflux/reactor.h"
#include "src/common/libtap/tap.h"
#include "server_thread.h"
int main (int argc, char *argv[])
{
struct pmi_server_context *srv;
int cfd[1];
int universe_size;
char *kvsname;
char *val;
char pmi_fd[16];
char pmi_rank[16];
char pmi_size[16];
int result;
int spawned;
int initialized;
int size;
int appnum;
int rank;
int kvsname_max;
int keylen_max;
int vallen_max;
int n;
char buf[64];
int clique_size;
int clique_ranks[1];
plan (NO_PLAN);
srv = pmi_server_create (cfd, 1);
snprintf (pmi_fd, sizeof (pmi_fd), "%d", cfd[0]);
snprintf (pmi_rank, sizeof (pmi_rank), "%d", 0);
snprintf (pmi_size, sizeof (pmi_size), "%d", 1);
setenv ("PMI_FD", pmi_fd, 1);
setenv ("PMI_RANK", pmi_rank, 1);
setenv ("PMI_SIZE", pmi_size, 1);
setenv ("PMI_DEBUG", "1", 1);
setenv ("PMI_SPAWNED", "0", 1);
/* Elicit PMI_ERR_INIT error by calling functions before PMI_Init()
*/
result = PMI_Initialized (&initialized);
ok (result == PMI_SUCCESS && initialized == 0,
"PMI_Initialized() works and set initialized=0");
result = PMI_Finalize ();
ok (result == PMI_ERR_INIT,
"PMI_Finalize before init fails with PMI_ERR_INIT");
result = PMI_Get_size (&size);
ok (result == PMI_ERR_INIT,
"PMI_Get_size before init fails with PMI_ERR_INIT");
result = PMI_Get_rank (&rank);
ok (result == PMI_ERR_INIT,
"PMI_Get_rank before init fails with PMI_ERR_INIT");
result = PMI_Get_universe_size (&universe_size);
ok (result == PMI_ERR_INIT,
"PMI_Get_universe_size before init fails with PMI_ERR_INIT");
result = PMI_Get_appnum (&appnum);
ok (result == PMI_ERR_INIT,
"PMI_Get_appnum before init fails with PMI_ERR_INIT");
result = PMI_KVS_Get_name_length_max (&kvsname_max);
ok (result == PMI_ERR_INIT,
"PMI_KVS_Get_name_length_max before init fails with PMI_ERR_INIT");
result = PMI_KVS_Get_key_length_max (&keylen_max);
ok (result == PMI_ERR_INIT,
"PMI_KVS_Get_key_length_max before init fails with PMI_ERR_INIT");
result = PMI_KVS_Get_value_length_max (&vallen_max);
ok (result == PMI_ERR_INIT,
"PMI_KVS_Get_value_length_max before init fails with PMI_ERR_INIT");
result = PMI_KVS_Get_my_name (buf, sizeof (buf));
ok (result == PMI_ERR_INIT,
"PMI_KVS_Get_my_name before init fails with PMI_ERR_INIT");
result = PMI_KVS_Put ("foo", "bar", "baz");
ok (result == PMI_ERR_INIT,
"PMI_KVS_Put before init fails with PMI_ERR_INIT");
result = PMI_KVS_Commit ("foo");
ok (result == PMI_ERR_INIT,
"PMI_KVS_Commit before init fails with PMI_ERR_INIT");
result = PMI_Barrier ();
ok (result == PMI_ERR_INIT,
"PMI_Barrier before init fails with PMI_ERR_INIT");
result = PMI_KVS_Get ("foo", "bar", buf, sizeof (buf));
ok (result == PMI_ERR_INIT,
"PMI_KVS_Get before init fails with PMI_ERR_INIT");
result = PMI_Get_clique_size (&clique_size);
ok (result == PMI_ERR_INIT,
"PMI_Get_clique_size before init fails with PMI_ERR_INIT");
result = PMI_Get_clique_ranks (clique_ranks, 1);
ok (result == PMI_ERR_INIT,
"PMI_Get_clique_ranks before init fails with PMI_ERR_INIT");
/* Initialize
*/
result = PMI_Init (&spawned);
ok (result == PMI_SUCCESS && spawned == 0,
"PMI_Init works and set spawned=0");
result = PMI_Initialized (NULL);
ok (result == PMI_ERR_INVALID_ARG,
"PMI_Initialized with NULL arg fails with PMI_ERR_INVALID_ARG");
result = PMI_Initialized (&initialized);
ok (result == PMI_SUCCESS && initialized == 1,
"PMI_Initialized works and set initialized=1");
/* second init */
result = PMI_Init (&spawned);
ok (result == PMI_ERR_INIT,
"Second PMI_Init fails with PMI_ERR_INIT");
/* retrieve basic params
*/
result = PMI_Get_size (NULL);
ok (result == PMI_ERR_INVALID_ARG,
"PMI_Get_size with NULL arg fails with PMI_ERR_INVALID_ARG");
result = PMI_Get_size (&size);
ok (result == PMI_SUCCESS && size == 1,
"PMI_Get_size works and set size=1");
result = PMI_Get_rank (NULL);
ok (result == PMI_ERR_INVALID_ARG,
"PMI_Get_rank with NULL arg fails with PMI_ERR_INVALID_ARG");
result = PMI_Get_rank (&rank);
ok (result == PMI_SUCCESS && rank == 0,
"PMI_Get_rank works and set rank=0");
result = PMI_Get_universe_size (NULL);
ok (result == PMI_ERR_INVALID_ARG,
"PMI_Get_universe_size with NULL arg fails with PMI_ERR_INVALID_ARG");
result = PMI_Get_universe_size (&universe_size);
ok (result == PMI_SUCCESS && universe_size == 1,
"PMI_Get_universe_size works and set universe_size=1");
result = PMI_Get_appnum (NULL);
ok (result == PMI_ERR_INVALID_ARG,
"PMI_Get_appnum with NULL arg fails with PMI_ERR_INVALID_ARG");
result = PMI_Get_appnum (&appnum);
ok (result == PMI_SUCCESS && appnum == 42,
"PMI_Get_appnum works and set appnum=42");
/* retrieve maxes
*/
result = PMI_KVS_Get_name_length_max (NULL);
ok (result == PMI_ERR_INVALID_ARG,
"PMI_KVS_Get_name_length_max len=NULL fails with PMI_ERR_INVALID_ARG");
result = PMI_KVS_Get_name_length_max (&kvsname_max);
ok (result == PMI_SUCCESS && kvsname_max > 0,
"PMI_KVS_Get_KVS_Get_name_length_max works and returned value > 0");
result = PMI_KVS_Get_key_length_max (NULL);
ok (result == PMI_ERR_INVALID_ARG,
"PMI_KVS_Get_key_length_max len=NULL fails with PMI_ERR_INVALID_ARG");
result = PMI_KVS_Get_key_length_max (&keylen_max);
ok (result == PMI_SUCCESS && keylen_max > 0,
"PMI_KVS_Get_KVS_Get_key_length_max works and returned value > 0");
result = PMI_KVS_Get_value_length_max (NULL);
ok (result == PMI_ERR_INVALID_ARG,
"PMI_KVS_Get_value_length_max len=NULL fails with PMI_ERR_INVALID_ARG");
result = PMI_KVS_Get_value_length_max (&vallen_max);
ok (result == PMI_SUCCESS && vallen_max > 0,
"PMI_Get_KVS_Get_value_length_max works and returned value > 0");
val = xzmalloc (vallen_max);
/* get kvsname
*/
kvsname = xzmalloc (kvsname_max);
result = PMI_KVS_Get_my_name (NULL, kvsname_max);
ok (result == PMI_ERR_INVALID_ARG,
"PMI_KVS_Get_my_name kvsname=NULL fails with PMI_ERR_INVALID_ARG");
result = PMI_KVS_Get_my_name (kvsname, -1);
ok (result == PMI_ERR_INVALID_ARG,
"PMI_KVS_Get_my_name len=-1 fails with PMI_ERR_INVALID_ARG");
result = PMI_KVS_Get_my_name (kvsname, kvsname_max);
ok (result == PMI_SUCCESS,
"PMI_Get_KVS_Get_my_name works");
diag ("kvsname=%s", kvsname);
/* put foo=bar / commit / barier / get foo
*/
result = PMI_KVS_Put (NULL, "foo", "bar");
ok (result == PMI_ERR_INVALID_ARG,
"PMI_KVS_Put kvsname=NULL fails with PMI_ERR_INVALID_ARG");
result = PMI_KVS_Put (kvsname, NULL, "bar");
ok (result == PMI_ERR_INVALID_ARG,
"PMI_KVS_Put key=NULL fails with PMI_ERR_INVALID_ARG");
result = PMI_KVS_Put (kvsname, "foo", NULL);
ok (result == PMI_ERR_INVALID_ARG,
"PMI_KVS_Put val=NULL fails with PMI_ERR_INVALID_ARG");
result = PMI_KVS_Put (kvsname, "foo", "bar");
ok (result == PMI_SUCCESS,
"PMI_KVS_Put works");
result = PMI_KVS_Commit (NULL);
ok (result == PMI_ERR_INVALID_ARG,
"PMI_KVS_Commit kvsname=NULL fails with PMI_ERR_INVALID_ARG");
result = PMI_KVS_Commit (kvsname);
ok (result == PMI_SUCCESS,
"PMI_KVS_Commit works");
result = PMI_Barrier ();
ok (result == PMI_SUCCESS,
"PMI_Barrier works");
result = PMI_KVS_Get (NULL, "foo", val, vallen_max);
ok (result == PMI_ERR_INVALID_ARG,
"PMI_KVS_Get kvsname=NULL fails with PMI_ERR_INVALID_ARG");
result = PMI_KVS_Get (kvsname, NULL, val, vallen_max);
ok (result == PMI_ERR_INVALID_ARG,
"PMI_KVS_Get key=NULL fails with PMI_ERR_INVALID_ARG");
result = PMI_KVS_Get (kvsname, "foo", NULL, vallen_max);
ok (result == PMI_ERR_INVALID_ARG,
"PMI_KVS_Get val=NULL fails with PMI_ERR_INVALID_ARG");
result = PMI_KVS_Get (kvsname, "foo", val, -1);
ok (result == PMI_ERR_INVALID_ARG,
"PMI_KVS_Get length=-1 fails with PMI_ERR_INVALID_ARG");
result = PMI_KVS_Get (kvsname, "foo", val, vallen_max);
ok (result == PMI_SUCCESS && !strcmp (val, "bar"),
"PMI_KVS_Get works and got expected value");
/* clique
*/
result = PMI_Get_clique_size (NULL);
ok (result == PMI_ERR_INVALID_ARG,
"PMI_Get_clique_size size=NULL fails with PMI_ERR_INVALID_ARG");
result = PMI_Get_clique_ranks (NULL, 1);
ok (result == PMI_ERR_INVALID_ARG,
"PMI_Get_clique_ranks ranks=NULL fails with PMI_ERR_INVALID_ARG");
result = PMI_Get_clique_ranks (clique_ranks, 0);
ok (result == PMI_ERR_INVALID_SIZE,
"PMI_Get_clique_ranks size=0 fails with PMI_ERR_INVALID_SIZE");
result = PMI_Get_clique_size (&clique_size);
ok (result == PMI_SUCCESS && clique_size == 1,
"PMI_Get_clique_size works and set size = 1");
result = PMI_Get_clique_ranks (clique_ranks, 1);
ok (result == PMI_SUCCESS && clique_ranks[0] == 0,
"PMI_Get_clique_ranks works and set ranks[0] = 0");
result = PMI_KVS_Put (kvsname, "PMI_process_mapping", "(vector,(0,1,1))");
ok (result == PMI_SUCCESS,
"successfully stored PMI_process_mapping");
result = PMI_Get_clique_size (&clique_size);
ok (result == PMI_SUCCESS && clique_size == 1,
"PMI_Get_clique_size retrieved expected clique size");
result = PMI_Get_clique_ranks (clique_ranks, 1);
ok (result == PMI_SUCCESS && clique_ranks[0] == 0,
"PMI_Get_clique_ranks retrieved expected clique ranks");
/* not implemented
*/
result = PMI_Publish_name ("foo", "42");
ok (result == PMI_FAIL,
"PMI_Publish_name (unimplemented) returns PMI_FAIL");
result = PMI_Unpublish_name ("foo");
ok (result == PMI_FAIL,
"PMI_Unpublish_name (unimplemented) returns PMI_FAIL");
result = PMI_Lookup_name ("foo", "42");
ok (result == PMI_FAIL,
"PMI_Lookup_name (unimplemented) returns PMI_FAIL");
result = PMI_Spawn_multiple (0, // count
NULL, // cmds
NULL, // argvs
NULL, // maxprocs
NULL, // info_keyval_sizesp
NULL, // info_keyval_vectors
0, // preput_keyval_size
NULL, // preput_keyval_vector
NULL); // errors
ok (result == PMI_FAIL,
"PMI_Spawn_multiple (unimplemented) returns PMI_FAIL");
result = PMI_KVS_Create (buf, sizeof (buf));
ok (result == PMI_FAIL,
"PMI_KVS_Create (unimplemented) resturns PMI_FAIL");
result = PMI_KVS_Destroy ("foo");
ok (result == PMI_FAIL,
"PMI_KVS_Destroy (unimplemented) resturns PMI_FAIL");
result = PMI_KVS_Iter_first ("foo", buf, sizeof (buf), buf, sizeof (buf));
ok (result == PMI_FAIL,
"PMI_KVS_Iter_first (unimplemented) resturns PMI_FAIL");
result = PMI_KVS_Iter_next ("foo", buf, sizeof (buf), buf, sizeof (buf));
ok (result == PMI_FAIL,
"PMI_KVS_Iter_next (unimplemented) resturns PMI_FAIL");
result = PMI_Parse_option (0, NULL, NULL, NULL, NULL);
ok (result == PMI_FAIL,
"PMI_Parse_option (unimplemented) resturns PMI_FAIL");
result = PMI_Args_to_keyval (NULL, NULL, NULL, NULL);
ok (result == PMI_FAIL,
"PMI_Args_to_keyval (unimplemented) resturns PMI_FAIL");
result = PMI_Free_keyvals (NULL, 0);
ok (result == PMI_FAIL,
"PMI_Free_keyvals (unimplemented) resturns PMI_FAIL");
result = PMI_Get_options (NULL, NULL);
ok (result == PMI_FAIL,
"PMI_Get_options (unimplemented) resturns PMI_FAIL");
/* aliases
*/
result = PMI_Get_id_length_max (&n);
ok (result == PMI_SUCCESS && n == kvsname_max,
"PMI_Get_id_lenght_max works and set idlen to kvsname_max");
result = PMI_Get_id (buf, sizeof (buf));
ok (result == PMI_SUCCESS && !strcmp (buf, kvsname),
"PMI_Get_id works and set buf to kvsname");
result = PMI_Get_kvs_domain_id (buf, sizeof (buf));
ok (result == PMI_SUCCESS && !strcmp (buf, kvsname),
"PMI_Get_kvs_domain_id works and set buf to kvsname");
/* finalize
*/
result = PMI_Finalize ();
ok (result == PMI_SUCCESS,
"PMI_Finalize works");
free (kvsname);
free (val);
pmi_server_destroy (srv);
done_testing ();
return 0;
}
/*
* vi:tabstop=4 shiftwidth=4 expandtab
*/
| 1 | 25,839 | typo in commit message `openening` | flux-framework-flux-core | c |
@@ -10,7 +10,10 @@
#include <mpi.h>
-#include <adios2.h>
+#define ADIOS_HAVE_PHDF5 // so hdf5 related items are loaded in ADIOS_CPP.h
+//#include "ADIOS_CPP.h"
+#include "adios2.h"
+#include "adios2/engine/hdf5/HDF5ReaderP.h"
int main(int argc, char *argv[])
{ | 1 | /*
* HDF5Writer.cpp
*
* Created on: March 20, 2017
* Author: Junmin
*/
#include <iostream>
#include <vector>
#include <mpi.h>
#include <adios2.h>
int main(int argc, char *argv[])
{
MPI_Init(&argc, &argv);
int rank, size;
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
const bool adiosDebug = true;
adios::ADIOS adios(MPI_COMM_WORLD, adios::Verbose::INFO, adiosDebug);
// Application variable
const std::size_t intDim1 = 4;
const std::size_t intDim2 = 3;
std::vector<int> myInts = {10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21};
std::vector<double> myDoubles = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
const std::size_t Nx = myDoubles.size();
std::vector<std::complex<float>> myCFloats;
const std::size_t ComplexDataSize = 3;
myCFloats.reserve(ComplexDataSize);
myCFloats.emplace_back(1, 3);
myCFloats.emplace_back(2, 2);
myCFloats.emplace_back(3, 1);
std::vector<std::complex<double>> myCDoubles;
myCDoubles.reserve(ComplexDataSize);
myCDoubles.emplace_back(1.1, -3.3);
myCDoubles.emplace_back(2.1, -2.2);
myCDoubles.emplace_back(3.1, -1.1);
std::vector<std::complex<long double>> myCLongDoubles;
myCLongDoubles.reserve(ComplexDataSize);
myCLongDoubles.emplace_back(1.11, -3.33);
myCLongDoubles.emplace_back(2.11, -2.22);
myCLongDoubles.emplace_back(3.11, -1.11);
std::size_t doubleVCount = Nx / size;
std::size_t complexCount = ComplexDataSize / size;
std::size_t intCountDim1 = intDim1 / size;
std::size_t doubleVOffset = rank * doubleVCount;
std::size_t complexOffset = rank * complexCount;
std::size_t intOffsetDim1 = rank * intCountDim1;
std::size_t intOffsetDim2 = 0;
if ((size > 1) && (rank == size - 1))
{
doubleVCount = Nx - rank * (Nx / size);
complexCount = ComplexDataSize - rank * (ComplexDataSize / size);
intCountDim1 = intDim1 - rank * (intDim1 / size);
}
try
{
// Define variable and local size
auto &ioMyInts =
adios.DefineVariable<int>("myInts", {intCountDim1, intDim2}, {4, 3},
{intOffsetDim1, intOffsetDim2});
auto &ioMyDoubles = adios.DefineVariable<double>(
"myDoubles", {doubleVCount}, {Nx}, {doubleVOffset});
auto &ioMyCFloats = adios.DefineVariable<std::complex<float>>(
"myCFloats", {complexCount}, {3}, {complexOffset});
auto &ioMyCDoubles = adios.DefineVariable<std::complex<double>>(
"myCDoubles", {complexCount}, {3}, {complexOffset});
auto &ioMyCLongDoubles =
adios.DefineVariable<std::complex<long double>>(
"myCLongDoubles", {complexCount}, {3}, {complexOffset});
// Define method for engine creation, it is basically straight-forward
// parameters
adios::Method &HDF5Settings = adios.DeclareMethod("w");
HDF5Settings.SetEngine("HDF5Writer");
HDF5Settings.SetParameters("chunck=yes", "collectiveIO=yes");
// HDF5Settings.AddTransport( "Mdtm", "localIP=128.0.0.0.1",
// "remoteIP=128.0.0.0.2", "tolerances=1,2,3" );
// Create engine smart pointer to HDF5 Engine due to polymorphism,
// Open returns a smart pointer to Engine containing the Derived class
// HDF5
auto HDF5Writer = adios.Open("test.h5", "w", HDF5Settings);
if (HDF5Writer == nullptr)
throw std::ios_base::failure(
"ERROR: failed to create HDF5 I/O engine at Open\n");
HDF5Writer->Write(ioMyDoubles, myDoubles.data() +
doubleVOffset); // Base class Engine
// own the Write<T>
// that will call
// overloaded Write
// from Derived
HDF5Writer->Write(ioMyInts,
myInts.data() + (intOffsetDim1 * intDim2 * rank));
HDF5Writer->Write(ioMyCFloats, myCFloats.data() + complexOffset);
HDF5Writer->Write(ioMyCDoubles, myCDoubles.data() + complexOffset);
HDF5Writer->Write(ioMyCLongDoubles,
myCLongDoubles.data() + complexOffset);
HDF5Writer->Close();
}
catch (std::invalid_argument &e)
{
if (rank == 0)
{
std::cout << "Invalid argument exception, STOPPING PROGRAM\n";
std::cout << e.what() << "\n";
}
}
catch (std::ios_base::failure &e)
{
if (rank == 0)
{
std::cout << "System exception, STOPPING PROGRAM\n";
std::cout << e.what() << "\n";
}
}
catch (std::exception &e)
{
if (rank == 0)
{
std::cout << "Exception, STOPPING PROGRAM\n";
std::cout << e.what() << "\n";
}
}
MPI_Finalize();
return 0;
}
| 1 | 11,479 | User-code doesn't see the internal ADIOS headers anymore. This whole include block should just be `#include <adios2.h>` | ornladios-ADIOS2 | cpp |
@@ -20,7 +20,8 @@ class BucketViewTest(BaseWebTest, unittest.TestCase):
def test_buckets_are_global_to_every_users(self):
self.app.patch_json(self.record_url,
- {'permissions': {'read': [Authenticated]}},
+ {'data': {},
+ 'permissions': {'read': [Authenticated]}},
headers=self.headers)
self.app.get(self.record_url, headers=get_user_headers('alice'))
| 1 | from pyramid.security import Authenticated
from .support import (BaseWebTest, unittest, get_user_headers,
MINIMALIST_BUCKET, MINIMALIST_GROUP,
MINIMALIST_COLLECTION, MINIMALIST_RECORD)
class BucketViewTest(BaseWebTest, unittest.TestCase):
collection_url = '/buckets'
record_url = '/buckets/beers'
def setUp(self):
super(BucketViewTest, self).setUp()
bucket = MINIMALIST_BUCKET.copy()
resp = self.app.put_json(self.record_url,
bucket,
headers=self.headers)
self.record = resp.json['data']
def test_buckets_are_global_to_every_users(self):
self.app.patch_json(self.record_url,
{'permissions': {'read': [Authenticated]}},
headers=self.headers)
self.app.get(self.record_url, headers=get_user_headers('alice'))
def test_buckets_do_not_support_post(self):
self.app.post(self.collection_url, headers=self.headers,
status=405)
def test_buckets_can_be_put_with_simple_name(self):
self.assertEqual(self.record['id'], 'beers')
def test_nobody_can_list_buckets_by_default(self):
self.app.get(self.collection_url,
headers=get_user_headers('alice'),
status=403)
def test_nobody_can_read_bucket_information_by_default(self):
self.app.get(self.record_url,
headers=get_user_headers('alice'),
status=403)
def test_buckets_name_should_be_simple(self):
self.app.put_json('/buckets/__beers__',
MINIMALIST_BUCKET,
headers=self.headers,
status=400)
def test_create_permissions_can_be_added_on_buckets(self):
bucket = MINIMALIST_BUCKET.copy()
bucket['permissions'] = {'collection:create': ['fxa:user'],
'group:create': ['fxa:user']}
resp = self.app.put_json('/buckets/beers',
bucket,
headers=self.headers,
status=200)
permissions = resp.json['permissions']
self.assertIn('fxa:user', permissions['collection:create'])
self.assertIn('fxa:user', permissions['group:create'])
def test_wrong_create_permissions_cannot_be_added_on_buckets(self):
bucket = MINIMALIST_BUCKET.copy()
bucket['permissions'] = {'record:create': ['fxa:user']}
self.app.put_json('/buckets/beers',
bucket,
headers=self.headers,
status=400)
class BucketReadPermissionTest(BaseWebTest, unittest.TestCase):
collection_url = '/buckets'
record_url = '/buckets/beers'
def setUp(self):
super(BucketReadPermissionTest, self).setUp()
bucket = MINIMALIST_BUCKET.copy()
self.app.put_json(self.record_url,
bucket,
headers=self.headers)
def get_app_settings(self, extra=None):
settings = super(BucketReadPermissionTest,
self).get_app_settings(extra)
# Give the right to list buckets (for self.principal and alice).
settings['cliquet.bucket_read_principals'] = Authenticated
return settings
def test_bucket_collection_endpoint_lists_them_all_for_everyone(self):
resp = self.app.get(self.collection_url,
headers=get_user_headers('alice'))
records = resp.json['data']
self.assertEqual(len(records), 1)
self.assertEqual(records[0]['id'], 'beers')
def test_everyone_can_read_bucket_information(self):
resp = self.app.get(self.record_url, headers=get_user_headers('alice'))
record = resp.json['data']
self.assertEqual(record['id'], 'beers')
class BucketDeletionTest(BaseWebTest, unittest.TestCase):
bucket_url = '/buckets/beers'
collection_url = '/buckets/beers/collections/barley'
group_url = '/buckets/beers/groups/moderators'
def setUp(self):
# Create a bucket with some objects.
self.app.put_json(self.bucket_url, MINIMALIST_BUCKET,
headers=self.headers)
self.app.put_json(self.group_url, MINIMALIST_GROUP,
headers=self.headers)
self.app.put_json(self.collection_url, MINIMALIST_COLLECTION,
headers=self.headers)
r = self.app.post_json(self.collection_url + '/records',
MINIMALIST_RECORD,
headers=self.headers)
record_id = r.json['data']['id']
self.record_url = self.collection_url + '/records/%s' % record_id
# Delete the bucket.
self.app.delete(self.bucket_url, headers=self.headers)
def get_app_settings(self, extra=None):
settings = super(BucketDeletionTest, self).get_app_settings(extra)
# Give the permission to read, to get an explicit 404 once deleted.
settings['cliquet.bucket_read_principals'] = self.principal
return settings
def test_buckets_can_be_deleted(self):
self.app.get(self.bucket_url, headers=self.headers,
status=404)
def test_every_collections_are_deleted_too(self):
self.app.put_json(self.bucket_url, MINIMALIST_BUCKET,
headers=self.headers)
self.app.get(self.collection_url, headers=self.headers, status=404)
def test_every_groups_are_deleted_too(self):
self.app.put_json(self.bucket_url, MINIMALIST_BUCKET,
headers=self.headers)
self.app.get(self.group_url, headers=self.headers, status=404)
def test_every_records_are_deleted_too(self):
self.app.put_json(self.bucket_url, MINIMALIST_BUCKET,
headers=self.headers)
self.app.put_json(self.collection_url, MINIMALIST_COLLECTION,
headers=self.headers)
self.app.get(self.record_url, headers=self.headers, status=404)
| 1 | 7,711 | nit: I guess we can omit this (unless you had a reason to specify it) | Kinto-kinto | py |
@@ -74,6 +74,11 @@ func (c *client) isSolicitedLeafNode() bool {
return c.kind == LEAF && c.leaf.remote != nil
}
+// Returns true if this is a solicited leafnode and is not configured to be treated as a hub.
+func (c *client) isSpokeLeafNode() bool {
+ return c.kind == LEAF && c.leaf.remote != nil && !c.leaf.remote.Hub
+}
+
func (c *client) isUnsolicitedLeafNode() bool {
return c.kind == LEAF && c.leaf.remote == nil
} | 1 | // Copyright 2019-2020 The NATS Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package server
import (
"bufio"
"bytes"
"crypto/tls"
"crypto/x509"
"encoding/base64"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net"
"net/url"
"reflect"
"regexp"
"runtime"
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/nats-io/nkeys"
"github.com/nats-io/nuid"
)
// Warning when user configures leafnode TLS insecure
const leafnodeTLSInsecureWarning = "TLS certificate chain and hostname of solicited leafnodes will not be verified. DO NOT USE IN PRODUCTION!"
// When a loop is detected, delay the reconnect of solicited connection.
const leafNodeReconnectDelayAfterLoopDetected = 30 * time.Second
// Prefix for loop detection subject
const leafNodeLoopDetectionSubjectPrefixOld = "lds."
const leafNodeLoopDetectionSubjectPrefix = "$" + leafNodeLoopDetectionSubjectPrefixOld
type leaf struct {
// Used to suppress sub and unsub interest. Same as routes but our audience
// here is tied to this leaf node. This will hold all subscriptions except this
// leaf nodes. This represents all the interest we want to send to the other side.
smap map[string]int32
// We have any auth stuff here for solicited connections.
remote *leafNodeCfg
}
// Used for remote (solicited) leafnodes.
type leafNodeCfg struct {
sync.RWMutex
*RemoteLeafOpts
urls []*url.URL
curURL *url.URL
tlsName string
username string
password string
loopDelay time.Duration // A loop condition was detected
}
// Check to see if this is a solicited leafnode. We do special processing for solicited.
func (c *client) isSolicitedLeafNode() bool {
return c.kind == LEAF && c.leaf.remote != nil
}
func (c *client) isUnsolicitedLeafNode() bool {
return c.kind == LEAF && c.leaf.remote == nil
}
// This will spin up go routines to solicit the remote leaf node connections.
func (s *Server) solicitLeafNodeRemotes(remotes []*RemoteLeafOpts) {
for _, r := range remotes {
remote := newLeafNodeCfg(r)
s.startGoRoutine(func() { s.connectToRemoteLeafNode(remote, true) })
}
}
func (s *Server) remoteLeafNodeStillValid(remote *leafNodeCfg) bool {
for _, ri := range s.getOpts().LeafNode.Remotes {
// FIXME(dlc) - What about auth changes?
if reflect.DeepEqual(ri.URLs, remote.URLs) {
return true
}
}
return false
}
// Ensure that leafnode is properly configured.
func validateLeafNode(o *Options) error {
if err := validateLeafNodeAuthOptions(o); err != nil {
return err
}
if o.LeafNode.Port == 0 {
return nil
}
if o.Gateway.Name == "" && o.Gateway.Port == 0 {
return nil
}
// If we are here we have both leaf nodes and gateways defined, make sure there
// is a system account defined.
if o.SystemAccount == "" {
return fmt.Errorf("leaf nodes and gateways (both being defined) require a system account to also be configured")
}
return nil
}
// Used to validate user names in LeafNode configuration.
// - rejects mix of single and multiple users.
// - rejects duplicate user names.
func validateLeafNodeAuthOptions(o *Options) error {
if len(o.LeafNode.Users) == 0 {
return nil
}
if o.LeafNode.Username != _EMPTY_ {
return fmt.Errorf("can not have a single user/pass and a users array")
}
users := map[string]struct{}{}
for _, u := range o.LeafNode.Users {
if _, exists := users[u.Username]; exists {
return fmt.Errorf("duplicate user %q detected in leafnode authorization", u.Username)
}
users[u.Username] = struct{}{}
}
return nil
}
func (s *Server) reConnectToRemoteLeafNode(remote *leafNodeCfg) {
delay := s.getOpts().LeafNode.ReconnectInterval
select {
case <-time.After(delay):
case <-s.quitCh:
s.grWG.Done()
return
}
s.connectToRemoteLeafNode(remote, false)
}
// Creates a leafNodeCfg object that wraps the RemoteLeafOpts.
func newLeafNodeCfg(remote *RemoteLeafOpts) *leafNodeCfg {
cfg := &leafNodeCfg{
RemoteLeafOpts: remote,
urls: make([]*url.URL, 0, len(remote.URLs)),
}
// Start with the one that is configured. We will add to this
// array when receiving async leafnode INFOs.
cfg.urls = append(cfg.urls, cfg.URLs...)
// If we are TLS make sure we save off a proper servername if possible.
// Do same for user/password since we may need them to connect to
// a bare URL that we get from INFO protocol.
for _, u := range cfg.urls {
cfg.saveTLSHostname(u)
cfg.saveUserPassword(u)
}
return cfg
}
// Will pick an URL from the list of available URLs.
func (cfg *leafNodeCfg) pickNextURL() *url.URL {
cfg.Lock()
defer cfg.Unlock()
// If the current URL is the first in the list and we have more than
// one URL, then move that one to end of the list.
if cfg.curURL != nil && len(cfg.urls) > 1 && urlsAreEqual(cfg.curURL, cfg.urls[0]) {
first := cfg.urls[0]
copy(cfg.urls, cfg.urls[1:])
cfg.urls[len(cfg.urls)-1] = first
}
cfg.curURL = cfg.urls[0]
return cfg.curURL
}
// Returns the current URL
func (cfg *leafNodeCfg) getCurrentURL() *url.URL {
cfg.RLock()
defer cfg.RUnlock()
return cfg.curURL
}
// Returns how long the server should wait before attempting
// to solicit a remote leafnode connection following the
// detection of a loop.
// Returns 0 if no loop was detected.
func (cfg *leafNodeCfg) getLoopDelay() time.Duration {
cfg.RLock()
delay := cfg.loopDelay
cfg.RUnlock()
return delay
}
// Reset the loop delay.
func (cfg *leafNodeCfg) resetLoopDelay() {
cfg.Lock()
cfg.loopDelay = 0
cfg.Unlock()
}
// Ensure that non-exported options (used in tests) have
// been properly set.
func (s *Server) setLeafNodeNonExportedOptions() {
opts := s.getOpts()
s.leafNodeOpts.dialTimeout = opts.LeafNode.dialTimeout
if s.leafNodeOpts.dialTimeout == 0 {
// Use same timeouts as routes for now.
s.leafNodeOpts.dialTimeout = DEFAULT_ROUTE_DIAL
}
s.leafNodeOpts.resolver = opts.LeafNode.resolver
if s.leafNodeOpts.resolver == nil {
s.leafNodeOpts.resolver = net.DefaultResolver
}
}
func (s *Server) connectToRemoteLeafNode(remote *leafNodeCfg, firstConnect bool) {
defer s.grWG.Done()
if remote == nil || len(remote.URLs) == 0 {
s.Debugf("Empty remote leafnode definition, nothing to connect")
return
}
opts := s.getOpts()
reconnectDelay := opts.LeafNode.ReconnectInterval
s.mu.Lock()
dialTimeout := s.leafNodeOpts.dialTimeout
resolver := s.leafNodeOpts.resolver
s.mu.Unlock()
if loopDelay := remote.getLoopDelay(); loopDelay > 0 {
select {
case <-time.After(loopDelay):
case <-s.quitCh:
return
}
remote.resetLoopDelay()
}
var conn net.Conn
const connErrFmt = "Error trying to connect as leafnode to remote server %q (attempt %v): %v"
attempts := 0
for s.isRunning() && s.remoteLeafNodeStillValid(remote) {
rURL := remote.pickNextURL()
url, err := s.getRandomIP(resolver, rURL.Host)
if err == nil {
var ipStr string
if url != rURL.Host {
ipStr = fmt.Sprintf(" (%s)", url)
}
s.Debugf("Trying to connect as leafnode to remote server on %q%s", rURL.Host, ipStr)
conn, err = net.DialTimeout("tcp", url, dialTimeout)
}
if err != nil {
attempts++
if s.shouldReportConnectErr(firstConnect, attempts) {
s.Errorf(connErrFmt, rURL.Host, attempts, err)
} else {
s.Debugf(connErrFmt, rURL.Host, attempts, err)
}
select {
case <-s.quitCh:
return
case <-time.After(reconnectDelay):
continue
}
}
if !s.remoteLeafNodeStillValid(remote) {
conn.Close()
return
}
// We have a connection here to a remote server.
// Go ahead and create our leaf node and return.
s.createLeafNode(conn, remote)
// We will put this in the normal log if first connect, does not force -DV mode to know
// that the connect worked.
if firstConnect {
s.Noticef("Connected leafnode to %q", rURL.Host)
}
return
}
}
// Save off the tlsName for when we use TLS and mix hostnames and IPs. IPs usually
// come from the server we connect to.
func (cfg *leafNodeCfg) saveTLSHostname(u *url.URL) {
isTLS := cfg.TLSConfig != nil || u.Scheme == "tls"
if isTLS && cfg.tlsName == "" && net.ParseIP(u.Hostname()) == nil {
cfg.tlsName = u.Hostname()
}
}
// Save off the username/password for when we connect using a bare URL
// that we get from the INFO protocol.
func (cfg *leafNodeCfg) saveUserPassword(u *url.URL) {
if cfg.username == _EMPTY_ && u.User != nil {
cfg.username = u.User.Username()
cfg.password, _ = u.User.Password()
}
}
// This is the leafnode's accept loop. This runs as a go-routine.
// The listen specification is resolved (if use of random port),
// then a listener is started. After that, this routine enters
// a loop (until the server is shutdown) accepting incoming
// leaf node connections from remote servers.
func (s *Server) leafNodeAcceptLoop(ch chan struct{}) {
defer func() {
if ch != nil {
close(ch)
}
}()
// Snapshot server options.
opts := s.getOpts()
port := opts.LeafNode.Port
if port == -1 {
port = 0
}
hp := net.JoinHostPort(opts.LeafNode.Host, strconv.Itoa(port))
l, e := net.Listen("tcp", hp)
if e != nil {
s.Fatalf("Error listening on leafnode port: %d - %v", opts.LeafNode.Port, e)
return
}
s.Noticef("Listening for leafnode connections on %s",
net.JoinHostPort(opts.LeafNode.Host, strconv.Itoa(l.Addr().(*net.TCPAddr).Port)))
s.mu.Lock()
tlsRequired := opts.LeafNode.TLSConfig != nil
tlsVerify := tlsRequired && opts.LeafNode.TLSConfig.ClientAuth == tls.RequireAndVerifyClientCert
info := Info{
ID: s.info.ID,
Version: s.info.Version,
GitCommit: gitCommit,
GoVersion: runtime.Version(),
AuthRequired: true,
TLSRequired: tlsRequired,
TLSVerify: tlsVerify,
MaxPayload: s.info.MaxPayload, // TODO(dlc) - Allow override?
Proto: 1, // Fixed for now.
}
// If we have selected a random port...
if port == 0 {
// Write resolved port back to options.
opts.LeafNode.Port = l.Addr().(*net.TCPAddr).Port
}
s.leafNodeInfo = info
// Possibly override Host/Port and set IP based on Cluster.Advertise
if err := s.setLeafNodeInfoHostPortAndIP(); err != nil {
s.Fatalf("Error setting leafnode INFO with LeafNode.Advertise value of %s, err=%v", s.opts.LeafNode.Advertise, err)
l.Close()
s.mu.Unlock()
return
}
// Add our LeafNode URL to the list that we send to servers connecting
// to our LeafNode accept URL. This call also regenerates leafNodeInfoJSON.
s.addLeafNodeURL(s.leafNodeInfo.IP)
// Setup state that can enable shutdown
s.leafNodeListener = l
// As of now, a server that does not have remotes configured would
// never solicit a connection, so we should not have to warn if
// InsecureSkipVerify is set in main LeafNodes config (since
// this TLS setting matters only when soliciting a connection).
// Still, warn if insecure is set in any of LeafNode block.
// We need to check remotes, even if tls is not required on accept.
warn := tlsRequired && opts.LeafNode.TLSConfig.InsecureSkipVerify
if !warn {
for _, r := range opts.LeafNode.Remotes {
if r.TLSConfig != nil && r.TLSConfig.InsecureSkipVerify {
warn = true
break
}
}
}
if warn {
s.Warnf(leafnodeTLSInsecureWarning)
}
s.mu.Unlock()
// Let them know we are up
close(ch)
ch = nil
tmpDelay := ACCEPT_MIN_SLEEP
for s.isRunning() {
conn, err := l.Accept()
if err != nil {
tmpDelay = s.acceptError("LeafNode", err, tmpDelay)
continue
}
tmpDelay = ACCEPT_MIN_SLEEP
s.startGoRoutine(func() {
s.createLeafNode(conn, nil)
s.grWG.Done()
})
}
s.Debugf("Leafnode accept loop exiting..")
s.done <- true
}
// RegEx to match a creds file with user JWT and Seed.
var credsRe = regexp.MustCompile(`\s*(?:(?:[-]{3,}[^\n]*[-]{3,}\n)(.+)(?:\n\s*[-]{3,}[^\n]*[-]{3,}\n))`)
// Lock should be held entering here.
func (c *client) sendLeafConnect(tlsRequired bool) {
// We support basic user/pass and operator based user JWT with signatures.
cinfo := leafConnectInfo{
TLS: tlsRequired,
Name: c.srv.info.ID,
}
// Check for credentials first, that will take precedence..
if creds := c.leaf.remote.Credentials; creds != "" {
c.Debugf("Authenticating with credentials file %q", c.leaf.remote.Credentials)
contents, err := ioutil.ReadFile(creds)
if err != nil {
c.Errorf("%v", err)
return
}
defer wipeSlice(contents)
items := credsRe.FindAllSubmatch(contents, -1)
if len(items) < 2 {
c.Errorf("Credentials file malformed")
return
}
// First result should be the user JWT.
// We copy here so that the file containing the seed will be wiped appropriately.
raw := items[0][1]
tmp := make([]byte, len(raw))
copy(tmp, raw)
// Seed is second item.
kp, err := nkeys.FromSeed(items[1][1])
if err != nil {
c.Errorf("Credentials file has malformed seed")
return
}
// Wipe our key on exit.
defer kp.Wipe()
sigraw, _ := kp.Sign(c.nonce)
sig := base64.RawURLEncoding.EncodeToString(sigraw)
cinfo.JWT = string(tmp)
cinfo.Sig = sig
} else if userInfo := c.leaf.remote.curURL.User; userInfo != nil {
cinfo.User = userInfo.Username()
cinfo.Pass, _ = userInfo.Password()
} else if c.leaf.remote.username != _EMPTY_ {
cinfo.User = c.leaf.remote.username
cinfo.Pass = c.leaf.remote.password
}
b, err := json.Marshal(cinfo)
if err != nil {
c.Errorf("Error marshaling CONNECT to route: %v\n", err)
c.closeConnection(ProtocolViolation)
return
}
// Although this call is made before the writeLoop is created,
// we don't really need to send in place. The protocol will be
// sent out by the writeLoop.
c.enqueueProto([]byte(fmt.Sprintf(ConProto, b)))
}
// Makes a deep copy of the LeafNode Info structure.
// The server lock is held on entry.
func (s *Server) copyLeafNodeInfo() *Info {
clone := s.leafNodeInfo
// Copy the array of urls.
if len(s.leafNodeInfo.LeafNodeURLs) > 0 {
clone.LeafNodeURLs = append([]string(nil), s.leafNodeInfo.LeafNodeURLs...)
}
return &clone
}
// Adds a LeafNode URL that we get when a route connects to the Info structure.
// Regenerates the JSON byte array so that it can be sent to LeafNode connections.
// Returns a boolean indicating if the URL was added or not.
// Server lock is held on entry
func (s *Server) addLeafNodeURL(urlStr string) bool {
// Make sure we already don't have it.
for _, url := range s.leafNodeInfo.LeafNodeURLs {
if url == urlStr {
return false
}
}
s.leafNodeInfo.LeafNodeURLs = append(s.leafNodeInfo.LeafNodeURLs, urlStr)
s.generateLeafNodeInfoJSON()
return true
}
// Removes a LeafNode URL of the route that is disconnecting from the Info structure.
// Regenerates the JSON byte array so that it can be sent to LeafNode connections.
// Returns a boolean indicating if the URL was removed or not.
// Server lock is held on entry.
func (s *Server) removeLeafNodeURL(urlStr string) bool {
// Don't need to do this if we are removing the route connection because
// we are shuting down...
if s.shutdown {
return false
}
removed := false
urls := s.leafNodeInfo.LeafNodeURLs
for i, url := range urls {
if url == urlStr {
// If not last, move last into the position we remove.
last := len(urls) - 1
if i != last {
urls[i] = urls[last]
}
s.leafNodeInfo.LeafNodeURLs = urls[0:last]
removed = true
break
}
}
if removed {
s.generateLeafNodeInfoJSON()
}
return removed
}
func (s *Server) generateLeafNodeInfoJSON() {
b, _ := json.Marshal(s.leafNodeInfo)
pcs := [][]byte{[]byte("INFO"), b, []byte(CR_LF)}
s.leafNodeInfoJSON = bytes.Join(pcs, []byte(" "))
}
// Sends an async INFO protocol so that the connected servers can update
// their list of LeafNode urls.
func (s *Server) sendAsyncLeafNodeInfo() {
for _, c := range s.leafs {
c.mu.Lock()
c.enqueueProto(s.leafNodeInfoJSON)
c.mu.Unlock()
}
}
// Called when an inbound leafnode connection is accepted or we create one for a solicited leafnode.
func (s *Server) createLeafNode(conn net.Conn, remote *leafNodeCfg) *client {
// Snapshot server options.
opts := s.getOpts()
maxPay := int32(opts.MaxPayload)
maxSubs := int32(opts.MaxSubs)
// For system, maxSubs of 0 means unlimited, so re-adjust here.
if maxSubs == 0 {
maxSubs = -1
}
now := time.Now()
c := &client{srv: s, nc: conn, kind: LEAF, opts: defaultOpts, mpay: maxPay, msubs: maxSubs, start: now, last: now}
c.leaf = &leaf{smap: map[string]int32{}}
// Determines if we are soliciting the connection or not.
var solicited bool
c.mu.Lock()
c.initClient()
if remote != nil {
solicited = true
// Users can bind to any local account, if its empty
// we will assume the $G account.
if remote.LocalAccount == "" {
remote.LocalAccount = globalAccountName
}
c.leaf.remote = remote
c.mu.Unlock()
// TODO: Decide what should be the optimal behavior here.
// For now, if lookup fails, we will constantly try
// to recreate this LN connection.
acc, err := s.LookupAccount(remote.LocalAccount)
if err != nil {
c.Errorf("No local account %q for leafnode: %v", remote.LocalAccount, err)
c.closeConnection(MissingAccount)
return nil
}
c.mu.Lock()
c.acc = acc
} else {
c.flags.set(expectConnect)
}
c.mu.Unlock()
var nonce [nonceLen]byte
// Grab server variables
s.mu.Lock()
info := s.copyLeafNodeInfo()
if !solicited {
s.generateNonce(nonce[:])
}
s.mu.Unlock()
// Grab lock
c.mu.Lock()
if solicited {
// We need to wait here for the info, but not for too long.
c.nc.SetReadDeadline(time.Now().Add(DEFAULT_LEAFNODE_INFO_WAIT))
br := bufio.NewReaderSize(c.nc, MAX_CONTROL_LINE_SIZE)
info, err := br.ReadString('\n')
if err != nil {
c.mu.Unlock()
if err == io.EOF {
c.closeConnection(ClientClosed)
} else {
c.closeConnection(ReadError)
}
return nil
}
c.nc.SetReadDeadline(time.Time{})
c.mu.Unlock()
// Handle only connection to wrong port here, others will be handled below.
if err := c.parse([]byte(info)); err == ErrConnectedToWrongPort {
c.Errorf(err.Error())
c.closeConnection(WrongPort)
return nil
}
c.mu.Lock()
if !c.flags.isSet(infoReceived) {
c.mu.Unlock()
c.Errorf("Did not get the remote leafnode's INFO, timed-out")
c.closeConnection(ReadError)
return nil
}
// Do TLS here as needed.
tlsRequired := remote.TLS || remote.TLSConfig != nil
if tlsRequired {
c.Debugf("Starting TLS leafnode client handshake")
// Specify the ServerName we are expecting.
var tlsConfig *tls.Config
if remote.TLSConfig != nil {
tlsConfig = remote.TLSConfig.Clone()
} else {
tlsConfig = &tls.Config{MinVersion: tls.VersionTLS12}
}
var host string
// If ServerName was given to us from the option, use that, always.
if tlsConfig.ServerName == "" {
url := remote.getCurrentURL()
host = url.Hostname()
// We need to check if this host is an IP. If so, we probably
// had this advertised to us and should use the configured host
// name for the TLS server name.
if remote.tlsName != "" && net.ParseIP(host) != nil {
host = remote.tlsName
}
tlsConfig.ServerName = host
}
c.nc = tls.Client(c.nc, tlsConfig)
conn := c.nc.(*tls.Conn)
// Setup the timeout
var wait time.Duration
if remote.TLSTimeout == 0 {
wait = TLS_TIMEOUT
} else {
wait = secondsToDuration(remote.TLSTimeout)
}
time.AfterFunc(wait, func() { tlsTimeout(c, conn) })
conn.SetReadDeadline(time.Now().Add(wait))
// Force handshake
c.mu.Unlock()
if err = conn.Handshake(); err != nil {
if solicited {
// If we overrode and used the saved tlsName but that failed
// we will clear that here. This is for the case that another server
// does not have the same tlsName, maybe only IPs.
// https://github.com/nats-io/nats-server/issues/1256
if _, ok := err.(x509.HostnameError); ok {
remote.Lock()
if host == remote.tlsName {
remote.tlsName = ""
}
remote.Unlock()
}
}
c.Errorf("TLS handshake error: %v", err)
c.closeConnection(TLSHandshakeError)
return nil
}
// Reset the read deadline
conn.SetReadDeadline(time.Time{})
// Re-Grab lock
c.mu.Lock()
}
c.sendLeafConnect(tlsRequired)
c.Debugf("Remote leafnode connect msg sent")
} else {
// Send our info to the other side.
// Remember the nonce we sent here for signatures, etc.
c.nonce = make([]byte, nonceLen)
copy(c.nonce, nonce[:])
info.Nonce = string(c.nonce)
info.CID = c.cid
b, _ := json.Marshal(info)
pcs := [][]byte{[]byte("INFO"), b, []byte(CR_LF)}
// We have to send from this go routine because we may
// have to block for TLS handshake before we start our
// writeLoop go routine. The other side needs to receive
// this before it can initiate the TLS handshake..
c.sendProtoNow(bytes.Join(pcs, []byte(" ")))
// Check to see if we need to spin up TLS.
if info.TLSRequired {
c.Debugf("Starting TLS leafnode server handshake")
c.nc = tls.Server(c.nc, opts.LeafNode.TLSConfig)
conn := c.nc.(*tls.Conn)
// Setup the timeout
ttl := secondsToDuration(opts.LeafNode.TLSTimeout)
time.AfterFunc(ttl, func() { tlsTimeout(c, conn) })
conn.SetReadDeadline(time.Now().Add(ttl))
// Force handshake
c.mu.Unlock()
if err := conn.Handshake(); err != nil {
c.Errorf("TLS handshake error: %v", err)
c.closeConnection(TLSHandshakeError)
return nil
}
// Reset the read deadline
conn.SetReadDeadline(time.Time{})
// Re-Grab lock
c.mu.Lock()
// Indicate that handshake is complete (used in monitoring)
c.flags.set(handshakeComplete)
}
// Leaf nodes will always require a CONNECT to let us know
// when we are properly bound to an account.
// The connection may have been closed
if !c.isClosed() {
c.setAuthTimer(secondsToDuration(opts.LeafNode.AuthTimeout))
}
}
// Spin up the read loop.
s.startGoRoutine(func() { c.readLoop() })
// Spin up the write loop.
s.startGoRoutine(func() { c.writeLoop() })
// Set the Ping timer
s.setFirstPingTimer(c)
c.mu.Unlock()
c.Debugf("Leafnode connection created")
// Update server's accounting here if we solicited.
// Also send our local subs.
if solicited {
// Make sure we register with the account here.
c.registerWithAccount(c.acc)
s.addLeafNodeConnection(c)
s.initLeafNodeSmap(c)
c.sendAllLeafSubs()
}
return c
}
func (c *client) processLeafnodeInfo(info *Info) error {
c.mu.Lock()
defer c.mu.Unlock()
if c.leaf == nil || c.isClosed() {
return nil
}
// Mark that the INFO protocol has been received.
// Note: For now, only the initial INFO has a nonce. We
// will probably do auto key rotation at some point.
if c.flags.setIfNotSet(infoReceived) {
// Prevent connecting to non leafnode port. Need to do this only for
// the first INFO, not for async INFO updates...
//
// Content of INFO sent by the server when accepting a tcp connection.
// -------------------------------------------------------------------
// Listen Port Of | CID | ClientConnectURLs | LeafNodeURLs | Gateway |
// -------------------------------------------------------------------
// CLIENT | X* | X** | | |
// ROUTE | | X** | X*** | |
// GATEWAY | | | | X |
// LEAFNODE | X | | X | |
// -------------------------------------------------------------------
// * Not on older servers.
// ** Not if "no advertise" is enabled.
// *** Not if leafnode's "no advertise" is enabled.
//
// As seen from above, a solicited LeafNode connection should receive
// from the remote server an INFO with CID and LeafNodeURLs. Anything
// else should be considered an attempt to connect to a wrong port.
if c.leaf.remote != nil && (info.CID == 0 || info.LeafNodeURLs == nil) {
return ErrConnectedToWrongPort
}
// Capture a nonce here.
c.nonce = []byte(info.Nonce)
if info.TLSRequired && c.leaf.remote != nil {
c.leaf.remote.TLS = true
}
}
// For both initial INFO and async INFO protocols, Possibly
// update our list of remote leafnode URLs we can connect to.
if c.leaf.remote != nil && len(info.LeafNodeURLs) > 0 {
// Consider the incoming array as the most up-to-date
// representation of the remote cluster's list of URLs.
c.updateLeafNodeURLs(info)
}
return nil
}
// When getting a leaf node INFO protocol, use the provided
// array of urls to update the list of possible endpoints.
func (c *client) updateLeafNodeURLs(info *Info) {
cfg := c.leaf.remote
cfg.Lock()
defer cfg.Unlock()
cfg.urls = make([]*url.URL, 0, 1+len(info.LeafNodeURLs))
// Add the ones we receive in the protocol
for _, surl := range info.LeafNodeURLs {
url, err := url.Parse("nats-leaf://" + surl)
if err != nil {
c.Errorf("Error parsing url %q: %v", surl, err)
continue
}
// Do not add if it's the same as what we already have configured.
var dup bool
for _, u := range cfg.URLs {
// URLs that we receive never have user info, but the
// ones that were configured may have. Simply compare
// host and port to decide if they are equal or not.
if url.Host == u.Host && url.Port() == u.Port() {
dup = true
break
}
}
if !dup {
cfg.urls = append(cfg.urls, url)
cfg.saveTLSHostname(url)
}
}
// Add the configured one
cfg.urls = append(cfg.urls, cfg.URLs...)
}
// Similar to setInfoHostPortAndGenerateJSON, but for leafNodeInfo.
func (s *Server) setLeafNodeInfoHostPortAndIP() error {
opts := s.getOpts()
if opts.LeafNode.Advertise != _EMPTY_ {
advHost, advPort, err := parseHostPort(opts.LeafNode.Advertise, opts.LeafNode.Port)
if err != nil {
return err
}
s.leafNodeInfo.Host = advHost
s.leafNodeInfo.Port = advPort
} else {
s.leafNodeInfo.Host = opts.LeafNode.Host
s.leafNodeInfo.Port = opts.LeafNode.Port
// If the host is "0.0.0.0" or "::" we need to resolve to a public IP.
// This will return at most 1 IP.
hostIsIPAny, ips, err := s.getNonLocalIPsIfHostIsIPAny(s.leafNodeInfo.Host, false)
if err != nil {
return err
}
if hostIsIPAny {
if len(ips) == 0 {
s.Errorf("Could not find any non-local IP for leafnode's listen specification %q",
s.leafNodeInfo.Host)
} else {
// Take the first from the list...
s.leafNodeInfo.Host = ips[0]
}
}
}
// Use just host:port for the IP
s.leafNodeInfo.IP = net.JoinHostPort(s.leafNodeInfo.Host, strconv.Itoa(s.leafNodeInfo.Port))
if opts.LeafNode.Advertise != _EMPTY_ {
s.Noticef("Advertise address for leafnode is set to %s", s.leafNodeInfo.IP)
}
return nil
}
func (s *Server) addLeafNodeConnection(c *client) {
c.mu.Lock()
cid := c.cid
c.mu.Unlock()
s.mu.Lock()
s.leafs[cid] = c
s.mu.Unlock()
}
func (s *Server) removeLeafNodeConnection(c *client) {
c.mu.Lock()
cid := c.cid
c.mu.Unlock()
s.mu.Lock()
delete(s.leafs, cid)
s.mu.Unlock()
}
type leafConnectInfo struct {
JWT string `json:"jwt,omitempty"`
Sig string `json:"sig,omitempty"`
User string `json:"user,omitempty"`
Pass string `json:"pass,omitempty"`
TLS bool `json:"tls_required"`
Comp bool `json:"compression,omitempty"`
Name string `json:"name,omitempty"`
// Just used to detect wrong connection attempts.
Gateway string `json:"gateway,omitempty"`
}
// processLeafNodeConnect will process the inbound connect args.
// Once we are here we are bound to an account, so can send any interest that
// we would have to the other side.
func (c *client) processLeafNodeConnect(s *Server, arg []byte, lang string) error {
// Way to detect clients that incorrectly connect to the route listen
// port. Client provided "lang" in the CONNECT protocol while LEAFNODEs don't.
if lang != "" {
c.sendErrAndErr(ErrClientConnectedToLeafNodePort.Error())
c.closeConnection(WrongPort)
return ErrClientConnectedToLeafNodePort
}
// Unmarshal as a leaf node connect protocol
proto := &leafConnectInfo{}
if err := json.Unmarshal(arg, proto); err != nil {
return err
}
// Reject if this has Gateway which means that it would be from a gateway
// connection that incorrectly connects to the leafnode port.
if proto.Gateway != "" {
errTxt := fmt.Sprintf("Rejecting connection from gateway %q on the leafnode port", proto.Gateway)
c.Errorf(errTxt)
c.sendErr(errTxt)
c.closeConnection(WrongGateway)
return ErrWrongGateway
}
// Leaf Nodes do not do echo or verbose or pedantic.
c.opts.Verbose = false
c.opts.Echo = false
c.opts.Pedantic = false
// Create and initialize the smap since we know our bound account now.
lm := s.initLeafNodeSmap(c)
// We are good to go, send over all the bound account subscriptions.
if lm <= 128 {
c.sendAllLeafSubs()
} else {
s.startGoRoutine(func() {
c.sendAllLeafSubs()
s.grWG.Done()
})
}
// Add in the leafnode here since we passed through auth at this point.
s.addLeafNodeConnection(c)
// Announce the account connect event for a leaf node.
// This will no-op as needed.
s.sendLeafNodeConnect(c.acc)
return nil
}
// Snapshot the current subscriptions from the sublist into our smap which
// we will keep updated from now on.
func (s *Server) initLeafNodeSmap(c *client) int {
acc := c.acc
if acc == nil {
c.Debugf("Leafnode does not have an account bound")
return 0
}
// Collect all account subs here.
_subs := [32]*subscription{}
subs := _subs[:0]
ims := []string{}
acc.mu.Lock()
accName := acc.Name
// If we are solicited we only send interest for local clients.
if c.isSolicitedLeafNode() {
acc.sl.localSubs(&subs)
} else {
acc.sl.All(&subs)
}
// Since leaf nodes only send on interest, if the bound
// account has import services we need to send those over.
for isubj := range acc.imports.services {
ims = append(ims, isubj)
}
// Create a unique subject that will be used for loop detection.
lds := acc.lds
if lds == _EMPTY_ {
lds = leafNodeLoopDetectionSubjectPrefix + nuid.Next()
acc.lds = lds
}
acc.mu.Unlock()
// Now check for gateway interest. Leafnodes will put this into
// the proper mode to propagate, but they are not held in the account.
gwsa := [16]*client{}
gws := gwsa[:0]
s.getOutboundGatewayConnections(&gws)
for _, cgw := range gws {
cgw.mu.Lock()
gw := cgw.gw
cgw.mu.Unlock()
if gw != nil {
if ei, _ := gw.outsim.Load(accName); ei != nil {
if e := ei.(*outsie); e != nil && e.sl != nil {
e.sl.All(&subs)
}
}
}
}
applyGlobalRouting := s.gateway.enabled
if c.isSolicitedLeafNode() {
// Add a fake subscription for this solicited leafnode connection
// so that we can send back directly for mapped GW replies.
c.srv.gwLeafSubs.Insert(&subscription{client: c, subject: []byte(gwReplyPrefix + ">")})
}
// Now walk the results and add them to our smap
c.mu.Lock()
for _, sub := range subs {
// We ignore ourselves here.
if c != sub.client {
c.leaf.smap[keyFromSub(sub)]++
}
}
// FIXME(dlc) - We need to update appropriately on an account claims update.
for _, isubj := range ims {
c.leaf.smap[isubj]++
}
// If we have gateways enabled we need to make sure the other side sends us responses
// that have been augmented from the original subscription.
// TODO(dlc) - Should we lock this down more?
if applyGlobalRouting {
c.leaf.smap[oldGWReplyPrefix+"*.>"]++
c.leaf.smap[gwReplyPrefix+">"]++
}
// Detect loop by subscribing to a specific subject and checking
// if this is coming back to us.
if c.leaf.remote == nil {
c.leaf.smap[lds]++
}
lenMap := len(c.leaf.smap)
c.mu.Unlock()
return lenMap
}
// updateInterestForAccountOnGateway called from gateway code when processing RS+ and RS-.
func (s *Server) updateInterestForAccountOnGateway(accName string, sub *subscription, delta int32) {
acc, err := s.LookupAccount(accName)
if acc == nil || err != nil {
s.Debugf("No or bad account for %q, failed to update interest from gateway", accName)
return
}
s.updateLeafNodes(acc, sub, delta)
}
// updateLeafNodes will make sure to update the smap for the subscription. Will
// also forward to all leaf nodes as needed.
func (s *Server) updateLeafNodes(acc *Account, sub *subscription, delta int32) {
if acc == nil || sub == nil {
return
}
_l := [32]*client{}
leafs := _l[:0]
// Grab all leaf nodes. Ignore a leafnode if sub's client is a leafnode and matches.
acc.mu.RLock()
for _, ln := range acc.lleafs {
if ln != sub.client {
leafs = append(leafs, ln)
}
}
acc.mu.RUnlock()
for _, ln := range leafs {
ln.updateSmap(sub, delta)
}
}
// This will make an update to our internal smap and determine if we should send out
// an interest update to the remote side.
func (c *client) updateSmap(sub *subscription, delta int32) {
key := keyFromSub(sub)
c.mu.Lock()
// If we are solicited make sure this is a local client or a non-solicited leaf node
skind := sub.client.kind
if c.isSolicitedLeafNode() && !(skind == CLIENT || (skind == LEAF && !sub.client.isSolicitedLeafNode())) {
c.mu.Unlock()
return
}
n := c.leaf.smap[key]
// We will update if its a queue, if count is zero (or negative), or we were 0 and are N > 0.
update := sub.queue != nil || n == 0 || n+delta <= 0
n += delta
if n > 0 {
c.leaf.smap[key] = n
} else {
delete(c.leaf.smap, key)
}
if update {
c.sendLeafNodeSubUpdate(key, n)
}
c.mu.Unlock()
}
// Send the subscription interest change to the other side.
// Lock should be held.
func (c *client) sendLeafNodeSubUpdate(key string, n int32) {
_b := [64]byte{}
b := bytes.NewBuffer(_b[:0])
c.writeLeafSub(b, key, n)
c.enqueueProto(b.Bytes())
}
// Helper function to build the key.
func keyFromSub(sub *subscription) string {
var _rkey [1024]byte
var key []byte
if sub.queue != nil {
// Just make the key subject spc group, e.g. 'foo bar'
key = _rkey[:0]
key = append(key, sub.subject...)
key = append(key, byte(' '))
key = append(key, sub.queue...)
} else {
key = sub.subject
}
return string(key)
}
// Send all subscriptions for this account that include local
// and possibly all other remote subscriptions.
func (c *client) sendAllLeafSubs() {
// Hold all at once for now.
var b bytes.Buffer
c.mu.Lock()
for key, n := range c.leaf.smap {
c.writeLeafSub(&b, key, n)
}
buf := b.Bytes()
if len(buf) > 0 {
c.queueOutbound(buf)
c.flushSignal()
}
c.mu.Unlock()
}
// Lock should be held.
func (c *client) writeLeafSub(w *bytes.Buffer, key string, n int32) {
if key == "" {
return
}
if n > 0 {
w.WriteString("LS+ " + key)
// Check for queue semantics, if found write n.
if strings.Contains(key, " ") {
w.WriteString(" ")
var b [12]byte
var i = len(b)
for l := n; l > 0; l /= 10 {
i--
b[i] = digits[l%10]
}
w.Write(b[i:])
if c.trace {
arg := fmt.Sprintf("%s %d", key, n)
c.traceOutOp("LS+", []byte(arg))
}
} else if c.trace {
c.traceOutOp("LS+", []byte(key))
}
} else {
w.WriteString("LS- " + key)
if c.trace {
c.traceOutOp("LS-", []byte(key))
}
}
w.WriteString(CR_LF)
}
// processLeafSub will process an inbound sub request for the remote leaf node.
func (c *client) processLeafSub(argo []byte) (err error) {
// Indicate activity.
c.in.subs++
srv := c.srv
if srv == nil {
return nil
}
// Copy so we do not reference a potentially large buffer
arg := make([]byte, len(argo))
copy(arg, argo)
args := splitArg(arg)
sub := &subscription{client: c}
switch len(args) {
case 1:
sub.queue = nil
case 3:
sub.queue = args[1]
sub.qw = int32(parseSize(args[2]))
default:
return fmt.Errorf("processLeafSub Parse Error: '%s'", arg)
}
sub.subject = args[0]
c.mu.Lock()
if c.isClosed() {
c.mu.Unlock()
return nil
}
// Check permissions if applicable.
if !c.canExport(string(sub.subject)) {
c.mu.Unlock()
c.Debugf("Can not export %q, ignoring remote subscription request", sub.subject)
return nil
}
// Check if we have a maximum on the number of subscriptions.
if c.subsAtLimit() {
c.mu.Unlock()
c.maxSubsExceeded()
return nil
}
// Like Routes, we store local subs by account and subject and optionally queue name.
// If we have a queue it will have a trailing weight which we do not want.
if sub.queue != nil {
sub.sid = arg[:len(arg)-len(args[2])-1]
} else {
sub.sid = arg
}
key := string(sub.sid)
osub := c.subs[key]
updateGWs := false
acc := c.acc
if osub == nil {
subj := string(sub.subject)
accUnlock := false
// Check if we have a loop.
if len(subj) >= len(leafNodeLoopDetectionSubjectPrefixOld) {
subStripped := subj
if subStripped[0] == '$' {
subStripped = subStripped[1:]
}
if strings.HasPrefix(subStripped, leafNodeLoopDetectionSubjectPrefixOld) {
// The following check (involving acc.sl) and the later insert need to be tied together
// using the account lock, such that checking and modifying the sublist appear as one operation.
acc.mu.Lock()
accUnlock = true
// There is a loop if we receive our own subscription back.
loopFound := subj == acc.lds
if !loopFound {
// Or if a subscription from a different client already exists.
if res := acc.sl.Match(subj); res != nil && len(res.psubs)+len(res.qsubs) != 0 {
loopFound = true
}
}
if loopFound {
acc.mu.Unlock()
c.mu.Unlock()
srv.reportLeafNodeLoop(c)
return nil
}
}
}
c.subs[key] = sub
// Now place into the account sl.
err := acc.sl.Insert(sub)
if accUnlock {
acc.mu.Unlock()
}
if err != nil {
delete(c.subs, key)
c.mu.Unlock()
c.Errorf("Could not insert subscription: %v", err)
c.sendErr("Invalid Subscription")
return nil
}
updateGWs = srv.gateway.enabled
} else if sub.queue != nil {
// For a queue we need to update the weight.
atomic.StoreInt32(&osub.qw, sub.qw)
acc.sl.UpdateRemoteQSub(osub)
}
solicited := c.isSolicitedLeafNode()
c.mu.Unlock()
if err := c.addShadowSubscriptions(acc, sub); err != nil {
c.Errorf(err.Error())
}
// If we are not solicited, treat leaf node subscriptions similar to a
// client subscription, meaning we forward them to routes, gateways and
// other leaf nodes as needed.
if !solicited {
// If we are routing add to the route map for the associated account.
srv.updateRouteSubscriptionMap(acc, sub, 1)
if updateGWs {
srv.gatewayUpdateSubInterest(acc.Name, sub, 1)
}
}
// Now check on leafnode updates for other leaf nodes. We understand solicited
// and non-solicited state in this call so we will do the right thing.
srv.updateLeafNodes(acc, sub, 1)
return nil
}
func (s *Server) reportLeafNodeLoop(c *client) {
delay := leafNodeReconnectDelayAfterLoopDetected
opts := s.getOpts()
if opts.LeafNode.loopDelay != 0 {
delay = opts.LeafNode.loopDelay
}
c.mu.Lock()
if c.leaf.remote != nil {
c.leaf.remote.Lock()
c.leaf.remote.loopDelay = delay
c.leaf.remote.Unlock()
}
accName := c.acc.Name
c.mu.Unlock()
c.sendErrAndErr(fmt.Sprintf("Loop detected for leafnode account=%q. Delaying attempt to reconnect for %v",
accName, delay))
}
// processLeafUnsub will process an inbound unsub request for the remote leaf node.
func (c *client) processLeafUnsub(arg []byte) error {
// Indicate any activity, so pub and sub or unsubs.
c.in.subs++
acc := c.acc
srv := c.srv
c.mu.Lock()
if c.isClosed() {
c.mu.Unlock()
return nil
}
updateGWs := false
// We store local subs by account and subject and optionally queue name.
// LS- will have the arg exactly as the key.
sub, ok := c.subs[string(arg)]
c.mu.Unlock()
if ok {
c.unsubscribe(acc, sub, true, true)
updateGWs = srv.gateway.enabled
}
// If we are routing subtract from the route map for the associated account.
srv.updateRouteSubscriptionMap(acc, sub, -1)
// Gateways
if updateGWs {
srv.gatewayUpdateSubInterest(acc.Name, sub, -1)
}
// Now check on leafnode updates for other leaf nodes.
srv.updateLeafNodes(acc, sub, -1)
return nil
}
func (c *client) processLeafMsgArgs(arg []byte) error {
// Unroll splitArgs to avoid runtime/heap issues
a := [MAX_MSG_ARGS][]byte{}
args := a[:0]
start := -1
for i, b := range arg {
switch b {
case ' ', '\t', '\r', '\n':
if start >= 0 {
args = append(args, arg[start:i])
start = -1
}
default:
if start < 0 {
start = i
}
}
}
if start >= 0 {
args = append(args, arg[start:])
}
c.pa.arg = arg
switch len(args) {
case 0, 1:
return fmt.Errorf("processLeafMsgArgs Parse Error: '%s'", args)
case 2:
c.pa.reply = nil
c.pa.queues = nil
c.pa.szb = args[1]
c.pa.size = parseSize(args[1])
case 3:
c.pa.reply = args[1]
c.pa.queues = nil
c.pa.szb = args[2]
c.pa.size = parseSize(args[2])
default:
// args[1] is our reply indicator. Should be + or | normally.
if len(args[1]) != 1 {
return fmt.Errorf("processLeafMsgArgs Bad or Missing Reply Indicator: '%s'", args[1])
}
switch args[1][0] {
case '+':
c.pa.reply = args[2]
case '|':
c.pa.reply = nil
default:
return fmt.Errorf("processLeafMsgArgs Bad or Missing Reply Indicator: '%s'", args[1])
}
// Grab size.
c.pa.szb = args[len(args)-1]
c.pa.size = parseSize(c.pa.szb)
// Grab queue names.
if c.pa.reply != nil {
c.pa.queues = args[3 : len(args)-1]
} else {
c.pa.queues = args[2 : len(args)-1]
}
}
if c.pa.size < 0 {
return fmt.Errorf("processLeafMsgArgs Bad or Missing Size: '%s'", args)
}
// Common ones processed after check for arg length
c.pa.subject = args[0]
return nil
}
// processInboundLeafMsg is called to process an inbound msg from a leaf node.
func (c *client) processInboundLeafMsg(msg []byte) {
// Update statistics
c.in.msgs++
// The msg includes the CR_LF, so pull back out for accounting.
c.in.bytes += int32(len(msg) - LEN_CR_LF)
// Check pub permissions
if c.perms != nil && (c.perms.pub.allow != nil || c.perms.pub.deny != nil) && !c.pubAllowed(string(c.pa.subject)) {
c.pubPermissionViolation(c.pa.subject)
return
}
srv := c.srv
acc := c.acc
// Mostly under testing scenarios.
if srv == nil || acc == nil {
return
}
// Check to see if we need to map/route to another account.
if acc.imports.services != nil {
c.checkForImportServices(acc, msg)
}
// Match the subscriptions. We will use our own L1 map if
// it's still valid, avoiding contention on the shared sublist.
var r *SublistResult
var ok bool
genid := atomic.LoadUint64(&c.acc.sl.genid)
if genid == c.in.genid && c.in.results != nil {
r, ok = c.in.results[string(c.pa.subject)]
} else {
// Reset our L1 completely.
c.in.results = make(map[string]*SublistResult)
c.in.genid = genid
}
// Go back to the sublist data structure.
if !ok {
r = c.acc.sl.Match(string(c.pa.subject))
c.in.results[string(c.pa.subject)] = r
// Prune the results cache. Keeps us from unbounded growth. Random delete.
if len(c.in.results) > maxResultCacheSize {
n := 0
for subject := range c.in.results {
delete(c.in.results, subject)
if n++; n > pruneSize {
break
}
}
}
}
// Collect queue names if needed.
var qnames [][]byte
// Check for no interest, short circuit if so.
// This is the fanout scale.
if len(r.psubs)+len(r.qsubs) > 0 {
flag := pmrNoFlag
// If we have queue subs in this cluster, then if we run in gateway
// mode and the remote gateways have queue subs, then we need to
// collect the queue groups this message was sent to so that we
// exclude them when sending to gateways.
if len(r.qsubs) > 0 && c.srv.gateway.enabled &&
atomic.LoadInt64(&c.srv.gateway.totalQSubs) > 0 {
flag |= pmrCollectQueueNames
}
qnames = c.processMsgResults(acc, r, msg, c.pa.subject, c.pa.reply, flag)
}
// Now deal with gateways
if c.srv.gateway.enabled {
c.sendMsgToGateways(acc, msg, c.pa.subject, c.pa.reply, qnames)
}
}
| 1 | 10,194 | Why is Hub public? | nats-io-nats-server | go |
@@ -114,7 +114,6 @@ func (agent *ecsAgent) capabilities() ([]*ecs.Attribute, error) {
}
capabilities = agent.appendTaskENICapabilities(capabilities)
- capabilities = agent.appendENITrunkingCapabilities(capabilities)
capabilities = agent.appendDockerDependentCapabilities(capabilities, supportedVersions)
| 1 | // Copyright 2014-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may
// not use this file except in compliance with the License. A copy of the
// License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed
// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
// express or implied. See the License for the specific language governing
// permissions and limitations under the License.
package app
import (
"github.com/aws/amazon-ecs-agent/agent/config"
"github.com/aws/amazon-ecs-agent/agent/dockerclient"
"github.com/aws/amazon-ecs-agent/agent/ecs_client/model/ecs"
"github.com/aws/amazon-ecs-agent/agent/ecscni"
"github.com/aws/aws-sdk-go/aws"
"github.com/cihub/seelog"
"github.com/pkg/errors"
)
const (
// capabilityPrefix is deprecated. For new capabilities, use attributePrefix.
capabilityPrefix = "com.amazonaws.ecs.capability."
attributePrefix = "ecs.capability."
capabilityTaskIAMRole = "task-iam-role"
capabilityTaskIAMRoleNetHost = "task-iam-role-network-host"
taskENIAttributeSuffix = "task-eni"
taskENIBlockInstanceMetadataAttributeSuffix = "task-eni-block-instance-metadata"
appMeshAttributeSuffix = "aws-appmesh"
cniPluginVersionSuffix = "cni-plugin-version"
capabilityTaskCPUMemLimit = "task-cpu-mem-limit"
capabilityDockerPluginInfix = "docker-plugin."
attributeSeparator = "."
capabilityPrivateRegistryAuthASM = "private-registry-authentication.secretsmanager"
capabilitySecretEnvSSM = "secrets.ssm.environment-variables"
capabilitySecretEnvASM = "secrets.asm.environment-variables"
capabiltyPIDAndIPCNamespaceSharing = "pid-ipc-namespace-sharing"
capabilityNvidiaDriverVersionInfix = "nvidia-driver-version."
capabilityECREndpoint = "ecr-endpoint"
capabilityContainerOrdering = "container-ordering"
taskEIAAttributeSuffix = "task-eia"
taskENITrunkingAttributeSuffix = "task-eni-trunking"
branchCNIPluginVersionSuffix = "branch-cni-plugin-version"
)
// capabilities returns the supported capabilities of this agent / docker-client pair.
// Currently, the following capabilities are possible:
//
// com.amazonaws.ecs.capability.privileged-container
// com.amazonaws.ecs.capability.docker-remote-api.1.17
// com.amazonaws.ecs.capability.docker-remote-api.1.18
// com.amazonaws.ecs.capability.docker-remote-api.1.19
// com.amazonaws.ecs.capability.docker-remote-api.1.20
// com.amazonaws.ecs.capability.logging-driver.json-file
// com.amazonaws.ecs.capability.logging-driver.syslog
// com.amazonaws.ecs.capability.logging-driver.fluentd
// com.amazonaws.ecs.capability.logging-driver.journald
// com.amazonaws.ecs.capability.logging-driver.gelf
// com.amazonaws.ecs.capability.logging-driver.none
// com.amazonaws.ecs.capability.selinux
// com.amazonaws.ecs.capability.apparmor
// com.amazonaws.ecs.capability.ecr-auth
// com.amazonaws.ecs.capability.task-iam-role
// com.amazonaws.ecs.capability.task-iam-role-network-host
// ecs.capability.docker-volume-driver.${driverName}
// ecs.capability.task-eni
// ecs.capability.task-eni-block-instance-metadata
// ecs.capability.execution-role-ecr-pull
// ecs.capability.execution-role-awslogs
// ecs.capability.container-health-check
// ecs.capability.private-registry-authentication.secretsmanager
// ecs.capability.secrets.ssm.environment-variables
// ecs.capability.pid-ipc-namespace-sharing
// ecs.capability.ecr-endpoint
// ecs.capability.secrets.asm.environment-variables
// ecs.capability.aws-appmesh
// ecs.capability.task-eia
// ecs.capability.task-eni-trunking
func (agent *ecsAgent) capabilities() ([]*ecs.Attribute, error) {
var capabilities []*ecs.Attribute
if !agent.cfg.PrivilegedDisabled {
capabilities = appendNameOnlyAttribute(capabilities, capabilityPrefix+"privileged-container")
}
supportedVersions := make(map[dockerclient.DockerVersion]bool)
// Determine API versions to report as supported. Supported versions are also used for capability-enablement, except
// logging drivers.
for _, version := range agent.dockerClient.SupportedVersions() {
capabilities = appendNameOnlyAttribute(capabilities, capabilityPrefix+"docker-remote-api."+string(version))
supportedVersions[version] = true
}
capabilities = agent.appendLoggingDriverCapabilities(capabilities)
if agent.cfg.SELinuxCapable {
capabilities = appendNameOnlyAttribute(capabilities, capabilityPrefix+"selinux")
}
if agent.cfg.AppArmorCapable {
capabilities = appendNameOnlyAttribute(capabilities, capabilityPrefix+"apparmor")
}
capabilities = agent.appendTaskIamRoleCapabilities(capabilities, supportedVersions)
capabilities, err := agent.appendTaskCPUMemLimitCapabilities(capabilities, supportedVersions)
if err != nil {
return nil, err
}
capabilities = agent.appendTaskENICapabilities(capabilities)
capabilities = agent.appendENITrunkingCapabilities(capabilities)
capabilities = agent.appendDockerDependentCapabilities(capabilities, supportedVersions)
// TODO: gate this on docker api version when ecs supported docker includes
// credentials endpoint feature from upstream docker
if agent.cfg.OverrideAWSLogsExecutionRole {
capabilities = appendNameOnlyAttribute(capabilities, attributePrefix+"execution-role-awslogs")
}
capabilities = agent.appendVolumeDriverCapabilities(capabilities)
// ecs agent version 1.19.0 supports private registry authentication using
// aws secrets manager
capabilities = appendNameOnlyAttribute(capabilities, attributePrefix+capabilityPrivateRegistryAuthASM)
// ecs agent version 1.22.0 supports ecs secrets integrating with aws systems manager
capabilities = appendNameOnlyAttribute(capabilities, attributePrefix+capabilitySecretEnvSSM)
// ecs agent version 1.22.0 supports sharing PID namespaces and IPC resource namespaces
// with host EC2 instance and among containers within the task
capabilities = appendNameOnlyAttribute(capabilities, attributePrefix+capabiltyPIDAndIPCNamespaceSharing)
if agent.cfg.GPUSupportEnabled {
capabilities = agent.appendNvidiaDriverVersionAttribute(capabilities)
}
// support ecr endpoint override
capabilities = appendNameOnlyAttribute(capabilities, attributePrefix+capabilityECREndpoint)
// ecs agent version 1.23.0 supports ecs secrets integrating with aws secrets manager
capabilities = appendNameOnlyAttribute(capabilities, attributePrefix+capabilitySecretEnvASM)
// ecs agent version 1.26.0 supports aws-appmesh cni plugin
capabilities = appendNameOnlyAttribute(capabilities, attributePrefix+appMeshAttributeSuffix)
// support elastic inference in agent
capabilities = appendNameOnlyAttribute(capabilities, attributePrefix+taskEIAAttributeSuffix)
// support container ordering in agent
capabilities = appendNameOnlyAttribute(capabilities, attributePrefix+capabilityContainerOrdering)
return capabilities, nil
}
func (agent *ecsAgent) appendDockerDependentCapabilities(capabilities []*ecs.Attribute,
supportedVersions map[dockerclient.DockerVersion]bool) []*ecs.Attribute {
if _, ok := supportedVersions[dockerclient.Version_1_19]; ok {
capabilities = appendNameOnlyAttribute(capabilities, capabilityPrefix+"ecr-auth")
capabilities = appendNameOnlyAttribute(capabilities, attributePrefix+"execution-role-ecr-pull")
}
if _, ok := supportedVersions[dockerclient.Version_1_24]; ok && !agent.cfg.DisableDockerHealthCheck {
// Docker health check was added in API 1.24
capabilities = appendNameOnlyAttribute(capabilities, attributePrefix+"container-health-check")
}
return capabilities
}
func (agent *ecsAgent) appendLoggingDriverCapabilities(capabilities []*ecs.Attribute) []*ecs.Attribute {
knownVersions := make(map[dockerclient.DockerVersion]struct{})
// Determine known API versions. Known versions are used exclusively for logging-driver enablement, since none of
// the structural API elements change.
for _, version := range agent.dockerClient.KnownVersions() {
knownVersions[version] = struct{}{}
}
for _, loggingDriver := range agent.cfg.AvailableLoggingDrivers {
requiredVersion := dockerclient.LoggingDriverMinimumVersion[loggingDriver]
if _, ok := knownVersions[requiredVersion]; ok {
capabilities = appendNameOnlyAttribute(capabilities, capabilityPrefix+"logging-driver."+string(loggingDriver))
}
}
return capabilities
}
func (agent *ecsAgent) appendTaskIamRoleCapabilities(capabilities []*ecs.Attribute, supportedVersions map[dockerclient.DockerVersion]bool) []*ecs.Attribute {
if agent.cfg.TaskIAMRoleEnabled {
// The "task-iam-role" capability is supported for docker v1.7.x onwards
// Refer https://github.com/docker/docker/blob/master/docs/reference/api/docker_remote_api.md
// to lookup the table of docker supportedVersions to API supportedVersions
if _, ok := supportedVersions[dockerclient.Version_1_19]; ok {
capabilities = appendNameOnlyAttribute(capabilities, capabilityPrefix+capabilityTaskIAMRole)
} else {
seelog.Warn("Task IAM Role not enabled due to unsuppported Docker version")
}
}
if agent.cfg.TaskIAMRoleEnabledForNetworkHost {
// The "task-iam-role-network-host" capability is supported for docker v1.7.x onwards
if _, ok := supportedVersions[dockerclient.Version_1_19]; ok {
capabilities = appendNameOnlyAttribute(capabilities, capabilityPrefix+capabilityTaskIAMRoleNetHost)
} else {
seelog.Warn("Task IAM Role for Host Network not enabled due to unsuppported Docker version")
}
}
return capabilities
}
func (agent *ecsAgent) appendTaskCPUMemLimitCapabilities(capabilities []*ecs.Attribute, supportedVersions map[dockerclient.DockerVersion]bool) ([]*ecs.Attribute, error) {
if agent.cfg.TaskCPUMemLimit.Enabled() {
if _, ok := supportedVersions[dockerclient.Version_1_22]; ok {
capabilities = appendNameOnlyAttribute(capabilities, attributePrefix+capabilityTaskCPUMemLimit)
} else if agent.cfg.TaskCPUMemLimit == config.ExplicitlyEnabled {
// explicitly enabled -- return an error because we cannot fulfil an explicit request
return nil, errors.New("engine: Task CPU + Mem limit cannot be enabled due to unsupported Docker version")
} else {
// implicitly enabled -- don't register the capability, but degrade gracefully
seelog.Warn("Task CPU + Mem Limit disabled due to unsupported Docker version. API version 1.22 or greater is required.")
agent.cfg.TaskCPUMemLimit = config.ExplicitlyDisabled
}
}
return capabilities, nil
}
func (agent *ecsAgent) appendTaskENICapabilities(capabilities []*ecs.Attribute) []*ecs.Attribute {
if agent.cfg.TaskENIEnabled {
// The assumption here is that all of the dependencies for supporting the
// Task ENI in the Agent have already been validated prior to the invocation of
// the `agent.capabilities()` call
capabilities = append(capabilities, &ecs.Attribute{
Name: aws.String(attributePrefix + taskENIAttributeSuffix),
})
taskENIVersionAttribute, err := agent.getTaskENIPluginVersionAttribute()
if err != nil {
return capabilities
}
capabilities = append(capabilities, taskENIVersionAttribute)
// We only care about AWSVPCBlockInstanceMetdata if Task ENI is enabled
if agent.cfg.AWSVPCBlockInstanceMetdata {
// If the Block Instance Metadata flag is set for AWS VPC networking mode, register a capability
// indicating the same
capabilities = append(capabilities, &ecs.Attribute{
Name: aws.String(attributePrefix + taskENIBlockInstanceMetadataAttributeSuffix),
})
}
}
return capabilities
}
// getTaskENIPluginVersionAttribute returns the version information of the ECS
// CNI plugins. It just executes the ENI plugin as the assumption is that these
// plugins are packaged with the ECS Agent, which means all of the other plugins
// should also emit the same version information. Also, the version information
// doesn't contribute to placement decisions and just serves as additional
// debugging information
func (agent *ecsAgent) getTaskENIPluginVersionAttribute() (*ecs.Attribute, error) {
version, err := agent.cniClient.Version(ecscni.ECSENIPluginName)
if err != nil {
seelog.Warnf(
"Unable to determine the version of the plugin '%s': %v",
ecscni.ECSENIPluginName, err)
return nil, err
}
return &ecs.Attribute{
Name: aws.String(attributePrefix + cniPluginVersionSuffix),
Value: aws.String(version),
}, nil
}
func appendNameOnlyAttribute(attributes []*ecs.Attribute, name string) []*ecs.Attribute {
return append(attributes, &ecs.Attribute{Name: aws.String(name)})
}
| 1 | 22,279 | why is this deleted? | aws-amazon-ecs-agent | go |
@@ -1,5 +1,6 @@
<div class="<%= trail.unstarted? ? "unstarted" : "started" %>">
<section class="trail <%= topic_class(trail.topic) %>">
+ <p class="prerequisite">Hey, before you start this...Have you done the <a href="">Intro to Rails</a> tutorial?</p>
<header>
<span class="topic-label"><%= trail.topic.name %></span>
<h1><%= link_to trail.name, trail %></h1> | 1 | <div class="<%= trail.unstarted? ? "unstarted" : "started" %>">
<section class="trail <%= topic_class(trail.topic) %>">
<header>
<span class="topic-label"><%= trail.topic.name %></span>
<h1><%= link_to trail.name, trail %></h1>
<p class="description"><%= trail.description %></p>
<%= render "practice/trail_description_tooltip", trail: trail %>
</header>
<%= render "trails/step_dots", trail: trail %>
<% if trail.exercises.present? %>
<%= completeable_link trail.exercises.first.url, class: "start-trail" do %>
Start trail
<% end %>
<% end %>
</section>
</div>
| 1 | 14,141 | Maybe move that into a partial | thoughtbot-upcase | rb |
@@ -158,10 +158,13 @@ func TestBlockDAO(t *testing.T) {
require := require.New(t)
ctx := context.Background()
- dao := NewBlockDAO(kvstore, indexer, false, config.Default.DB)
+ testDBFile, _ := ioutil.TempFile(os.TempDir(), "db")
+ cfg := config.Default.DB
+ cfg.DbPath = testDBFile.Name()
+ dao := NewBlockDAO(kvstore, indexer, false, cfg)
require.NoError(dao.Start(ctx))
defer func() {
- require.NoError(dao.Stop(ctx))
+ dao.Stop(ctx)
}()
// receipts for the 3 blocks | 1 | package blockdao
import (
"context"
"hash/fnv"
"io/ioutil"
"math/big"
"math/rand"
"os"
"testing"
"time"
"github.com/iotexproject/go-pkgs/hash"
"github.com/pkg/errors"
"github.com/stretchr/testify/require"
"github.com/iotexproject/iotex-core/action"
"github.com/iotexproject/iotex-core/blockchain/block"
"github.com/iotexproject/iotex-core/blockindex"
"github.com/iotexproject/iotex-core/config"
"github.com/iotexproject/iotex-core/db"
"github.com/iotexproject/iotex-core/pkg/unit"
"github.com/iotexproject/iotex-core/test/identityset"
"github.com/iotexproject/iotex-core/testutil"
)
func getTestBlocks(t *testing.T) []*block.Block {
amount := uint64(50 << 22)
tsf1, err := testutil.SignedTransfer(identityset.Address(28).String(), identityset.PrivateKey(28), 1, big.NewInt(int64(amount)), nil, testutil.TestGasLimit, big.NewInt(0))
require.NoError(t, err)
tsf2, err := testutil.SignedTransfer(identityset.Address(29).String(), identityset.PrivateKey(29), 2, big.NewInt(int64(amount)), nil, testutil.TestGasLimit, big.NewInt(0))
require.NoError(t, err)
tsf3, err := testutil.SignedTransfer(identityset.Address(30).String(), identityset.PrivateKey(30), 3, big.NewInt(int64(amount)), nil, testutil.TestGasLimit, big.NewInt(0))
require.NoError(t, err)
tsf4, err := testutil.SignedTransfer(identityset.Address(29).String(), identityset.PrivateKey(28), 2, big.NewInt(int64(amount)), nil, testutil.TestGasLimit, big.NewInt(0))
require.NoError(t, err)
tsf5, err := testutil.SignedTransfer(identityset.Address(30).String(), identityset.PrivateKey(29), 3, big.NewInt(int64(amount)), nil, testutil.TestGasLimit, big.NewInt(0))
require.NoError(t, err)
tsf6, err := testutil.SignedTransfer(identityset.Address(28).String(), identityset.PrivateKey(30), 4, big.NewInt(int64(amount)), nil, testutil.TestGasLimit, big.NewInt(0))
require.NoError(t, err)
// create testing executions
execution1, err := testutil.SignedExecution(identityset.Address(31).String(), identityset.PrivateKey(28), 1, big.NewInt(1), 0, big.NewInt(0), nil)
require.NoError(t, err)
execution2, err := testutil.SignedExecution(identityset.Address(31).String(), identityset.PrivateKey(29), 2, big.NewInt(0), 0, big.NewInt(0), nil)
require.NoError(t, err)
execution3, err := testutil.SignedExecution(identityset.Address(31).String(), identityset.PrivateKey(30), 3, big.NewInt(2), 0, big.NewInt(0), nil)
require.NoError(t, err)
hash1 := hash.Hash256{}
fnv.New32().Sum(hash1[:])
blk1, err := block.NewTestingBuilder().
SetHeight(1).
SetPrevBlockHash(hash1).
SetTimeStamp(testutil.TimestampNow().UTC()).
AddActions(tsf1, tsf4, execution1).
SignAndBuild(identityset.PrivateKey(27))
require.NoError(t, err)
hash2 := hash.Hash256{}
fnv.New32().Sum(hash2[:])
blk2, err := block.NewTestingBuilder().
SetHeight(2).
SetPrevBlockHash(hash2).
SetTimeStamp(testutil.TimestampNow().UTC()).
AddActions(tsf2, tsf5, execution2).
SignAndBuild(identityset.PrivateKey(27))
require.NoError(t, err)
hash3 := hash.Hash256{}
fnv.New32().Sum(hash3[:])
blk3, err := block.NewTestingBuilder().
SetHeight(3).
SetPrevBlockHash(hash3).
SetTimeStamp(testutil.TimestampNow().UTC()).
AddActions(tsf3, tsf6, execution3).
SignAndBuild(identityset.PrivateKey(27))
require.NoError(t, err)
return []*block.Block{&blk1, &blk2, &blk3}
}
func TestBlockDAO(t *testing.T) {
blks := getTestBlocks(t)
t1Hash := blks[0].Actions[0].Hash()
t4Hash := blks[0].Actions[1].Hash()
e1Hash := blks[0].Actions[2].Hash()
t2Hash := blks[1].Actions[0].Hash()
t5Hash := blks[1].Actions[1].Hash()
e2Hash := blks[1].Actions[2].Hash()
t3Hash := blks[2].Actions[0].Hash()
t6Hash := blks[2].Actions[1].Hash()
e3Hash := blks[2].Actions[2].Hash()
addr28 := hash.BytesToHash160(identityset.Address(28).Bytes())
addr29 := hash.BytesToHash160(identityset.Address(29).Bytes())
addr30 := hash.BytesToHash160(identityset.Address(30).Bytes())
addr31 := hash.BytesToHash160(identityset.Address(31).Bytes())
type index struct {
addr hash.Hash160
hashes [][]byte
}
daoTests := []struct {
total uint64
hashTotal [][]byte
actions [4]index
}{
{
9,
[][]byte{t1Hash[:], t4Hash[:], e1Hash[:], t2Hash[:], t5Hash[:], e2Hash[:], t3Hash[:], t6Hash[:], e3Hash[:]},
[4]index{
{addr28, [][]byte{t1Hash[:], t4Hash[:], e1Hash[:], t6Hash[:]}},
{addr29, [][]byte{t4Hash[:], t2Hash[:], t5Hash[:], e2Hash[:]}},
{addr30, [][]byte{t5Hash[:], t3Hash[:], t6Hash[:], e3Hash[:]}},
{addr31, [][]byte{e1Hash[:], e2Hash[:], e3Hash[:]}},
},
},
{
6,
[][]byte{t1Hash[:], t4Hash[:], e1Hash[:], t2Hash[:], t5Hash[:], e2Hash[:]},
[4]index{
{addr28, [][]byte{t1Hash[:], t4Hash[:], e1Hash[:]}},
{addr29, [][]byte{t4Hash[:], t2Hash[:], t5Hash[:], e2Hash[:]}},
{addr30, [][]byte{t5Hash[:]}},
{addr31, [][]byte{e1Hash[:], e2Hash[:]}},
},
},
{
3,
[][]byte{t1Hash[:], t4Hash[:], e1Hash[:]},
[4]index{
{addr28, [][]byte{t1Hash[:], t4Hash[:], e1Hash[:]}},
{addr29, [][]byte{t4Hash[:]}},
{addr30, nil},
{addr31, [][]byte{e1Hash[:]}},
},
},
{
0,
nil,
[4]index{
{addr28, nil},
{addr29, nil},
{addr30, nil},
{addr31, nil},
},
},
}
testBlockDao := func(kvstore db.KVStore, indexer blockindex.Indexer, t *testing.T) {
require := require.New(t)
ctx := context.Background()
dao := NewBlockDAO(kvstore, indexer, false, config.Default.DB)
require.NoError(dao.Start(ctx))
defer func() {
require.NoError(dao.Stop(ctx))
}()
// receipts for the 3 blocks
receipts := [][]*action.Receipt{
{
{1, 1, t1Hash, 15, "1", []*action.Log{}},
{0, 1, t4Hash, 216, "2", []*action.Log{}},
{2, 1, e1Hash, 6, "3", []*action.Log{}},
},
{
{3, 2, t2Hash, 1500, "1", []*action.Log{}},
{5, 2, t5Hash, 34, "2", []*action.Log{}},
{9, 2, e2Hash, 655, "3", []*action.Log{}},
},
{
{7, 3, t3Hash, 488, "1", []*action.Log{}},
{6, 3, t6Hash, 2, "2", []*action.Log{}},
{2, 3, e3Hash, 1099, "3", []*action.Log{}},
},
}
height, err := indexer.GetBlockchainHeight()
require.NoError(err)
require.EqualValues(0, height)
for i := 0; i < 3; i++ {
// test putBlock/Receipt
blks[i].Receipts = receipts[i]
require.NoError(dao.PutBlock(blks[i]))
require.NoError(dao.Commit())
blks[i].Receipts = nil
// test getBlockchainHeight
height, err := indexer.GetBlockchainHeight()
require.NoError(err)
require.Equal(blks[i].Height(), height)
// test getTipHash
hash, err := dao.GetTipHash()
require.NoError(err)
require.Equal(blks[i].HashBlock(), hash)
// test getBlock()
blk, err := dao.GetBlock(blks[i].HashBlock())
require.NoError(err)
require.Equal(blks[i], blk)
}
// Test getReceiptByActionHash
for j := range daoTests[0].hashTotal {
h := hash.BytesToHash256(daoTests[0].hashTotal[j])
receipt, err := dao.GetReceiptByActionHash(h, uint64(j/3)+1)
require.NoError(err)
require.Equal(receipts[j/3][j%3], receipt)
action, err := dao.GetActionByActionHash(h, uint64(j/3)+1)
require.NoError(err)
require.Equal(h, action.Hash())
}
}
testDeleteDao := func(kvstore db.KVStore, indexer blockindex.Indexer, t *testing.T) {
require := require.New(t)
ctx := context.Background()
dao := NewBlockDAO(kvstore, indexer, false, config.Default.DB)
require.NoError(dao.Start(ctx))
defer func() {
require.NoError(dao.Stop(ctx))
}()
// put blocks
for i := 0; i < 3; i++ {
require.NoError(dao.PutBlock(blks[i]))
}
require.NoError(dao.Commit())
height, err := indexer.GetBlockchainHeight()
require.NoError(err)
require.EqualValues(3, height)
// delete tip block one by one, verify address/action after each deletion
for i := range daoTests {
if i == 0 {
// tests[0] is the whole address/action data at block height 3
continue
}
prevTipHeight, err := dao.GetTipHeight()
require.NoError(err)
prevTipHash, err := dao.GetBlockHash(prevTipHeight)
require.NoError(err)
require.NoError(dao.DeleteBlockToTarget(prevTipHeight - 1))
tipHeight, err := indexer.GetBlockchainHeight()
require.NoError(err)
require.EqualValues(prevTipHeight-1, tipHeight)
tipHeight, err = dao.GetTipHeight()
require.NoError(err)
require.EqualValues(prevTipHeight-1, tipHeight)
h, err := indexer.GetBlockHash(tipHeight)
require.NoError(err)
h1, err := dao.GetTipHash()
require.NoError(err)
require.Equal(h, h1)
_, err = dao.GetBlockHash(prevTipHeight)
require.Error(err)
_, err = dao.GetBlockHeight(prevTipHash)
require.Error(err)
if i <= 2 {
require.Equal(blks[2-i].HashBlock(), h)
} else {
require.Equal(hash.ZeroHash256, h)
}
total, err := indexer.GetTotalActions()
require.NoError(err)
require.EqualValues(daoTests[i].total, total)
if total > 0 {
_, err = indexer.GetActionHashFromIndex(1, total)
require.Equal(db.ErrInvalid, errors.Cause(err))
actions, err := indexer.GetActionHashFromIndex(0, total)
require.NoError(err)
require.Equal(actions, daoTests[i].hashTotal)
}
for j := range daoTests[i].actions {
actionCount, err := indexer.GetActionCountByAddress(daoTests[i].actions[j].addr)
require.NoError(err)
require.EqualValues(len(daoTests[i].actions[j].hashes), actionCount)
if actionCount > 0 {
actions, err := indexer.GetActionsByAddress(daoTests[i].actions[j].addr, 0, actionCount)
require.NoError(err)
require.Equal(actions, daoTests[i].actions[j].hashes)
}
}
}
}
t.Run("In-memory KV Store for blocks", func(t *testing.T) {
indexer, err := blockindex.NewIndexer(db.NewMemKVStore(), hash.ZeroHash256)
require.NoError(t, err)
testBlockDao(db.NewMemKVStore(), indexer, t)
})
path := "test-kv-store"
testFile, _ := ioutil.TempFile(os.TempDir(), path)
testPath := testFile.Name()
indexFile, _ := ioutil.TempFile(os.TempDir(), path)
indexPath := indexFile.Name()
cfg := config.Default.DB
t.Run("Bolt DB for blocks", func(t *testing.T) {
testutil.CleanupPath(t, testPath)
testutil.CleanupPath(t, indexPath)
defer func() {
testutil.CleanupPath(t, testPath)
testutil.CleanupPath(t, indexPath)
}()
cfg.DbPath = indexPath
indexer, err := blockindex.NewIndexer(db.NewBoltDB(cfg), hash.ZeroHash256)
require.NoError(t, err)
cfg.DbPath = testPath
testBlockDao(db.NewBoltDB(cfg), indexer, t)
})
t.Run("In-memory KV Store deletions", func(t *testing.T) {
indexer, err := blockindex.NewIndexer(db.NewMemKVStore(), hash.ZeroHash256)
require.NoError(t, err)
testDeleteDao(db.NewMemKVStore(), indexer, t)
})
t.Run("Bolt DB deletions", func(t *testing.T) {
testutil.CleanupPath(t, testPath)
testutil.CleanupPath(t, indexPath)
defer func() {
testutil.CleanupPath(t, testPath)
testutil.CleanupPath(t, indexPath)
}()
cfg.DbPath = indexPath
indexer, err := blockindex.NewIndexer(db.NewBoltDB(cfg), hash.ZeroHash256)
require.NoError(t, err)
cfg.DbPath = testPath
testDeleteDao(db.NewBoltDB(cfg), indexer, t)
})
}
func BenchmarkBlockCache(b *testing.B) {
test := func(cacheSize int, b *testing.B) {
b.StopTimer()
path := "test-kv-store"
testFile, _ := ioutil.TempFile(os.TempDir(), path)
testPath := testFile.Name()
indexFile, _ := ioutil.TempFile(os.TempDir(), path)
indexPath := indexFile.Name()
cfg := config.DB{
NumRetries: 1,
}
defer func() {
require.NoError(b, os.RemoveAll(testPath))
require.NoError(b, os.RemoveAll(indexPath))
}()
cfg.DbPath = indexPath
indexer, err := blockindex.NewIndexer(db.NewBoltDB(cfg), hash.ZeroHash256)
require.NoError(b, err)
cfg.DbPath = testPath
store := db.NewBoltDB(cfg)
db := config.Default.DB
db.MaxCacheSize = cacheSize
blkDao := NewBlockDAO(store, indexer, false, db)
require.NoError(b, blkDao.Start(context.Background()))
defer func() {
require.NoError(b, blkDao.Stop(context.Background()))
}()
prevHash := hash.ZeroHash256
numBlks := 8640
for i := 1; i <= numBlks; i++ {
actions := make([]action.SealedEnvelope, 10)
for j := 0; j < 10; j++ {
actions[j], err = testutil.SignedTransfer(
identityset.Address(j).String(),
identityset.PrivateKey(j+1),
1,
unit.ConvertIotxToRau(1),
nil,
testutil.TestGasLimit,
testutil.TestGasPrice,
)
require.NoError(b, err)
}
tb := block.TestingBuilder{}
blk, err := tb.SetPrevBlockHash(prevHash).
SetVersion(1).
SetTimeStamp(time.Now()).
SetHeight(uint64(i)).
AddActions(actions...).
SignAndBuild(identityset.PrivateKey(0))
require.NoError(b, err)
err = blkDao.PutBlock(&blk)
require.NoError(b, err)
prevHash = blk.HashBlock()
}
b.ResetTimer()
b.StartTimer()
for n := 0; n < b.N; n++ {
hash, _ := indexer.GetBlockHash(uint64(rand.Intn(numBlks) + 1))
_, _ = blkDao.GetBlock(hash)
}
b.StopTimer()
}
b.Run("cache", func(b *testing.B) {
test(8640, b)
})
b.Run("no-cache", func(b *testing.B) {
test(0, b)
})
}
| 1 | 20,013 | Error return value of `dao.Stop` is not checked (from `errcheck`) | iotexproject-iotex-core | go |
@@ -172,7 +172,7 @@ func handleMainConfigArgs(cmd *cobra.Command, args []string, app *ddevapp.DdevAp
if docrootRelPath != "" {
app.Docroot = docrootRelPath
if _, err = os.Stat(docrootRelPath); os.IsNotExist(err) {
- util.Failed("The docroot provided (%v) does not exist", docrootRelPath)
+ output.UserOut.Warnf("Warning: the provided docroot at %s does not currently exist.", docrootRelPath)
}
} else if !cmd.Flags().Changed("docroot") {
app.Docroot = ddevapp.DiscoverDefaultDocroot(app) | 1 | package cmd
import (
"fmt"
"os"
"strings"
"path/filepath"
"github.com/drud/ddev/pkg/ddevapp"
"github.com/drud/ddev/pkg/output"
"github.com/drud/ddev/pkg/util"
"github.com/spf13/cobra"
)
// docrootRelPath is the relative path to the docroot where index.php is
var docrootRelPath string
// siteName is the name of the site
var siteName string
// appType is the ddev app type, like drupal7/drupal8/wordpress
var appType string
// showConfigLocation if set causes the command to show the config locatio
var showConfigLocation bool
// extraFlagsHandlingFunc does specific handling for additional flags, and is different per provider.
var extraFlagsHandlingFunc func(cmd *cobra.Command, args []string, app *ddevapp.DdevApp) error
var providerName = ddevapp.DefaultProviderName
// ConfigCommand represents the `ddev config` command
var ConfigCommand *cobra.Command = &cobra.Command{
Use: "config [provider]",
Short: "Create or modify a ddev project configuration in the current directory",
Example: `"ddev config" or "ddev config --docroot=. --projectname=d7-kickstart --projecttype=drupal7"`,
Args: cobra.ExactArgs(0),
Run: handleConfigRun,
}
// handleConfigRun handles all the flag processing for any provider
func handleConfigRun(cmd *cobra.Command, args []string) {
app, err := getConfigApp(providerName)
if err != nil {
util.Failed(err.Error())
}
if cmd.Flags().NFlag() == 0 {
err = app.PromptForConfig()
if err != nil {
util.Failed("There was a problem configuring your project: %v", err)
}
} else {
err = handleMainConfigArgs(cmd, args, app)
if err != nil {
util.Failed(err.Error())
}
if extraFlagsHandlingFunc != nil {
err = extraFlagsHandlingFunc(cmd, args, app)
if err != nil {
util.Failed("failed to handle per-provider extra flags: %v", err)
}
}
}
provider, err := app.GetProvider()
if err != nil {
util.Failed("Failed to get provider: %v", err)
}
err = provider.Validate()
if err != nil {
util.Failed("Failed to validate project name %v: %v", app.Name, err)
}
err = app.WriteConfig()
if err != nil {
util.Failed("Failed to write config: %v", err)
}
_, err = app.CreateSettingsFile()
if err != nil {
util.Warning("Could not write settings file: %v", err)
}
err = provider.Write(app.GetConfigPath("import.yaml"))
if err != nil {
util.Failed("Failed to write provider config: %v", err)
}
util.Success("Configuration complete. You may now run 'ddev start'.")
}
func init() {
validAppTypes := strings.Join(ddevapp.GetValidAppTypes(), ", ")
apptypeUsage := fmt.Sprintf("Provide the project type (one of %s). This is autodetected and this flag is necessary only to override the detection.", validAppTypes)
projectNameUsage := fmt.Sprintf("Provide the project name of project to configure (normally the same as the last part of directory name)")
ConfigCommand.Flags().StringVarP(&siteName, "projectname", "", "", projectNameUsage)
ConfigCommand.Flags().StringVarP(&docrootRelPath, "docroot", "", "", "Provide the relative docroot of the project, like 'docroot' or 'htdocs' or 'web', defaults to empty, the current directory")
ConfigCommand.Flags().StringVarP(&appType, "projecttype", "", "", apptypeUsage)
// apptype flag is there for backwards compatibility.
ConfigCommand.Flags().StringVarP(&appType, "apptype", "", "", apptypeUsage+" This is the same as --projecttype and is included only for backwards compatibility.")
ConfigCommand.Flags().BoolVarP(&showConfigLocation, "show-config-location", "", false, "Output the location of the config.yaml file if it exists, or error that it doesn't exist.")
ConfigCommand.Flags().StringVarP(&siteName, "sitename", "", "", projectNameUsage+" This is the same as projectname and is included only for backwards compatibility")
err := ConfigCommand.Flags().MarkDeprecated("sitename", "The sitename flag is deprecated in favor of --projectname")
util.CheckErr(err)
err = ConfigCommand.Flags().MarkDeprecated("apptype", "The apptype flag is deprecated in favor of --projecttype")
util.CheckErr(err)
RootCmd.AddCommand(ConfigCommand)
}
// getConfigApp() does the basic setup of the app (with provider) and returns it.
func getConfigApp(providerName string) (*ddevapp.DdevApp, error) {
appRoot, err := os.Getwd()
if err != nil {
return nil, fmt.Errorf("could not determine current working directory: %v", err)
}
// TODO: Handle case where config may be in parent directories.
app, err := ddevapp.NewApp(appRoot, providerName)
if err != nil {
return nil, fmt.Errorf("could not create new config: %v", err)
}
return app, nil
}
// handleMainConfigArgs() validates and processes the main config args (docroot, etc.)
func handleMainConfigArgs(cmd *cobra.Command, args []string, app *ddevapp.DdevApp) error {
var err error
// Support the show-config-location flag.
if showConfigLocation {
// nolint: vetshadow
activeApp, err := ddevapp.GetActiveApp("")
if err != nil {
if strings.Contains(err.Error(), "Have you run 'ddev config'") {
util.Failed("No project configuration currently exists")
} else {
util.Failed("Failed to access project configuration: %v", err)
}
}
if activeApp.ConfigPath != "" && activeApp.ConfigExists() {
rawResult := make(map[string]interface{})
rawResult["configpath"] = activeApp.ConfigPath
rawResult["approot"] = activeApp.AppRoot
friendlyMsg := fmt.Sprintf("The project config location is %s", activeApp.ConfigPath)
output.UserOut.WithField("raw", rawResult).Print(friendlyMsg)
return nil
}
}
// Let them know if we're replacing the config.yaml
app.WarnIfConfigReplace()
// app.Name gets set to basename if not provided, or set to siteName if provided
if app.Name != "" && siteName == "" { // If we already have a c.Name and no siteName, leave c.Name alone
// Sorry this is empty but it makes the logic clearer.
} else if siteName != "" { // if we have a siteName passed in, use it for c.Name
app.Name = siteName
} else { // No siteName passed, c.Name not set: use c.Name from the directory
// nolint: vetshadow
pwd, err := os.Getwd()
util.CheckErr(err)
app.Name = filepath.Base(pwd)
}
// docrootRelPath must exist
if docrootRelPath != "" {
app.Docroot = docrootRelPath
if _, err = os.Stat(docrootRelPath); os.IsNotExist(err) {
util.Failed("The docroot provided (%v) does not exist", docrootRelPath)
}
} else if !cmd.Flags().Changed("docroot") {
app.Docroot = ddevapp.DiscoverDefaultDocroot(app)
}
if appType != "" && !ddevapp.IsValidAppType(appType) {
validAppTypes := strings.Join(ddevapp.GetValidAppTypes(), ", ")
util.Failed("apptype must be one of %s", validAppTypes)
}
detectedApptype := app.DetectAppType()
fullPath, pathErr := filepath.Abs(app.Docroot)
if pathErr != nil {
util.Failed("Failed to get absolute path to Docroot %s: %v", app.Docroot, pathErr)
}
if appType == "" || appType == detectedApptype { // Found an app, matches passed-in or no apptype passed
appType = detectedApptype
util.Success("Found a %s codebase at %s", detectedApptype, fullPath)
} else if appType != "" { // apptype was passed, but we found no app at all
util.Warning("You have specified a project type of %s but no project of that type is found in %s", appType, fullPath)
} else if appType != "" && detectedApptype != appType { // apptype was passed, app was found, but not the same type
util.Warning("You have specified a project type of %s but a project of type %s was discovered in %s", appType, detectedApptype, fullPath)
}
app.Type = appType
err = app.ConfigFileOverrideAction()
if err != nil {
util.Failed("failed to run ConfigFileOverrideAction: %v", err)
}
err = app.WriteConfig()
if err != nil {
return fmt.Errorf("could not write ddev config file %s: %v", app.ConfigPath, err)
}
return nil
}
| 1 | 13,056 | util.Warning()? Easier to say. | drud-ddev | go |
@@ -38,6 +38,14 @@ func Trie(tx ethdb.Tx, slowChecks bool, quit <-chan struct{}) {
if errc != nil {
panic(errc)
}
+ select {
+ default:
+ case <-quit:
+ return
+ case <-logEvery.C:
+ log.Info("trie account integrity", "key", fmt.Sprintf("%x", k))
+ }
+
hasState, hasBranch, hasHash, hashes, _ := trie.UnmarshalTrieNode(v)
AssertSubset(k, hasBranch, hasState)
AssertSubset(k, hasHash, hasState) | 1 | package integrity
import (
"bytes"
"encoding/binary"
"fmt"
"math/bits"
"time"
"github.com/ledgerwatch/turbo-geth/common"
"github.com/ledgerwatch/turbo-geth/common/dbutils"
"github.com/ledgerwatch/turbo-geth/common/hexutil"
"github.com/ledgerwatch/turbo-geth/ethdb"
"github.com/ledgerwatch/turbo-geth/log"
"github.com/ledgerwatch/turbo-geth/turbo/trie"
)
// AssertSubset a & b == a - checks whether a is subset of b
func AssertSubset(prefix []byte, a, b uint16) {
if (a & b) != a {
panic(fmt.Errorf("invariant 'is subset' failed: %x, %b, %b", prefix, a, b))
}
}
func Trie(tx ethdb.Tx, slowChecks bool, quit <-chan struct{}) {
logEvery := time.NewTicker(10 * time.Second)
defer logEvery.Stop()
seek := make([]byte, 256)
buf := make([]byte, 256)
buf2 := make([]byte, 256)
{
c, trieAcc2, accC := tx.Cursor(dbutils.TrieOfAccountsBucket), tx.Cursor(dbutils.TrieOfAccountsBucket), tx.Cursor(dbutils.HashedAccountsBucket)
defer c.Close()
defer trieAcc2.Close()
defer accC.Close()
for k, v, errc := c.First(); k != nil; k, v, errc = c.Next() {
if errc != nil {
panic(errc)
}
hasState, hasBranch, hasHash, hashes, _ := trie.UnmarshalTrieNode(v)
AssertSubset(k, hasBranch, hasState)
AssertSubset(k, hasHash, hasState)
if bits.OnesCount16(hasHash) != len(hashes)/common.HashLength {
panic(fmt.Errorf("invariant bits.OnesCount16(hasHash) == len(hashes) failed: %d, %d", bits.OnesCount16(hasHash), len(v[6:])/common.HashLength))
}
found := false
var parentK []byte
// must have parent with right hasBranch bit
for i := len(k) - 1; i > 0 && !found; i-- {
parentK = k[:i]
kParent, vParent, err := trieAcc2.SeekExact(parentK)
if err != nil {
panic(err)
}
if kParent == nil {
continue
}
found = true
parentHasBranch := binary.BigEndian.Uint16(vParent[2:])
parentHasBit := 1<<uint16(k[len(parentK)])&parentHasBranch != 0
if !parentHasBit {
panic(fmt.Errorf("for %x found parent %x, but it has no branchBit: %016b", k, parentK, parentHasBranch))
}
}
if !found && len(k) > 1 {
panic(fmt.Errorf("trie hash %x has no parent", k))
}
// must have all children
seek = seek[:len(k)+1]
copy(seek, k)
for i := uint16(0); i < 16; i++ {
if 1<<i&hasBranch == 0 {
continue
}
seek[len(seek)-1] = uint8(i)
k2, _, err := trieAcc2.Seek(seek)
if err != nil {
panic(err)
}
if k2 == nil {
panic(fmt.Errorf("key %x has branches %016b, but there is no child %d in db; last seen key: %x->nil", k, hasBranch, i, seek))
}
if !bytes.HasPrefix(k2, seek) {
panic(fmt.Errorf("key %x has branches %016b, but there is no child %d in db; last seen key: %x->%x", k, hasBranch, i, seek, k2))
}
}
if !slowChecks {
continue
}
// each AccTrie must cover some state
buf = buf[:len(k)+1]
copy(buf, k)
for i := uint16(0); i < 16; i++ {
if 1<<i&hasState == 0 {
continue
}
found := false
buf = buf[:len(k)+1]
buf[len(buf)-1] = uint8(i)
bitsToMatch := len(buf) * 4
if len(buf)%2 == 1 {
buf = append(buf, 0)
}
hexutil.CompressNibbles(buf, &seek)
if err := ethdb.Walk(accC, seek, bitsToMatch, func(k, v []byte) (bool, error) {
found = true
return false, nil
}); err != nil {
panic(err)
}
if !found {
panic(fmt.Errorf("key %x has state %016b, but there is no child %d,%x in state", k, hasState, i, seek))
}
}
select {
default:
case <-quit:
return
case <-logEvery.C:
log.Info("trie account integrity", "key", fmt.Sprintf("%x", k))
}
}
}
{
c, trieStorage, storageC := tx.Cursor(dbutils.TrieOfStorageBucket), tx.Cursor(dbutils.TrieOfStorageBucket), tx.Cursor(dbutils.HashedStorageBucket)
defer c.Close()
defer trieStorage.Close()
defer storageC.Close()
for k, v, errc := c.First(); k != nil; k, v, errc = c.Next() {
if errc != nil {
panic(errc)
}
hasState, hasBranch, hasHash, hashes, _ := trie.UnmarshalTrieNode(v)
AssertSubset(k, hasBranch, hasState)
AssertSubset(k, hasHash, hasState)
if bits.OnesCount16(hasHash) != len(hashes)/common.HashLength {
panic(fmt.Errorf("invariant bits.OnesCount16(hasHash) == len(hashes) failed: %d, %d", bits.OnesCount16(hasHash), len(hashes)/common.HashLength))
}
found := false
var parentK []byte
// must have parent with right hasBranch bit
for i := len(k) - 1; i >= 40 && !found; i-- {
parentK = k[:i]
kParent, vParent, err := trieStorage.SeekExact(parentK)
if err != nil {
panic(err)
}
if kParent == nil {
continue
}
found = true
parentBranches := binary.BigEndian.Uint16(vParent[2:])
parentHasBit := 1<<uint16(k[len(parentK)])&parentBranches != 0
if !parentHasBit {
panic(fmt.Errorf("for %x found parent %x, but it has no branchBit for child: %016b", k, parentK, parentBranches))
}
}
if !found && len(k) > 40 {
panic(fmt.Errorf("trie hash %x has no parent. Last checked: %x", k, parentK))
}
// must have all children
seek = seek[:len(k)+1]
copy(seek, k)
for i := uint16(0); i < 16; i++ {
if 1<<i&hasBranch == 0 {
continue
}
seek[len(seek)-1] = uint8(i)
k2, _, err := trieStorage.Seek(seek)
if err != nil {
panic(err)
}
if !bytes.HasPrefix(k2, seek) {
panic(fmt.Errorf("key %x has branches %016b, but there is no child %d in db", k, hasBranch, i))
}
}
if !slowChecks {
continue
}
// each AccTrie must cover some state
buf = buf[:len(k)-40+1]
copy(buf, k[40:])
for i := uint16(0); i < 16; i++ {
if 1<<i&hasState == 0 {
continue
}
found := false
buf = buf[:len(k)-40+1]
buf[len(buf)-1] = uint8(i)
bitsToMatch := 40*8 + len(buf)*4
if len(buf)%2 == 1 {
buf = append(buf, 0)
}
hexutil.CompressNibbles(buf, &buf2)
seek = seek[:40+len(buf2)]
copy(seek, k[:40])
copy(seek[40:], buf2)
if err := ethdb.Walk(storageC, seek, bitsToMatch, func(k, v []byte) (bool, error) {
found = true
return false, nil
}); err != nil {
panic(err)
}
if !found {
panic(fmt.Errorf("key %x has state %016b, but there is no child %d,%x in state", k, hasState, i, seek))
}
}
select {
default:
case <-quit:
return
case <-logEvery.C:
log.Info("trie storage integrity", "key", fmt.Sprintf("%x", k))
}
}
}
}
| 1 | 22,022 | this default is kinda superfluous (although i see it was already there before PR ) | ledgerwatch-erigon | go |
@@ -53,11 +53,7 @@ public class NodeList<N extends Node> implements List<N>, Iterable<N>, HasParent
private List<AstObserver> observers = new ArrayList<>();
public NodeList() {
- this((Node) null);
- }
-
- public NodeList(Node parent) {
- setParentNode(parent);
+ parentNode = null;
}
public NodeList(Collection<N> n) { | 1 | /*
* Copyright (C) 2007-2010 Júlio Vilmar Gesser.
* Copyright (C) 2011, 2013-2016 The JavaParser Team.
*
* This file is part of JavaParser.
*
* JavaParser can be used either under the terms of
* a) the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
* b) the terms of the Apache License
*
* You should have received a copy of both licenses in LICENCE.LGPL and
* LICENCE.APACHE. Please refer to those files for details.
*
* JavaParser is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*/
package com.github.javaparser.ast;
import com.github.javaparser.HasParentNode;
import com.github.javaparser.ast.observer.AstObserver;
import com.github.javaparser.ast.observer.Observable;
import com.github.javaparser.ast.visitor.GenericVisitor;
import com.github.javaparser.ast.visitor.Visitable;
import com.github.javaparser.ast.visitor.VoidVisitor;
import com.github.javaparser.metamodel.InternalProperty;
import java.util.*;
import java.util.function.*;
import java.util.stream.Collector;
import java.util.stream.Collectors;
import java.util.stream.Stream;
/**
* A list of nodes.
* It usually has a parent node.
* Unlike normal Nodes, this does not mean that it is a child of that parent.
* Instead, this list will make every node it contains a child of its parent.
* This way, a NodeList does not create an extra level inside the AST.
*
* @param <N> the type of nodes contained.
*/
public class NodeList<N extends Node> implements List<N>, Iterable<N>, HasParentNode<NodeList<N>>, Visitable, Observable {
@InternalProperty
private List<N> innerList = new ArrayList<>(0);
private Node parentNode;
private List<AstObserver> observers = new ArrayList<>();
public NodeList() {
this((Node) null);
}
public NodeList(Node parent) {
setParentNode(parent);
}
public NodeList(Collection<N> n) {
this.addAll(n);
}
public NodeList(N... n) {
this.addAll(Arrays.asList(n));
}
@Override
public boolean add(N node) {
notifyElementAdded(innerList.size(), node);
own(node);
return innerList.add(node);
}
private void own(N node) {
if (node == null) {
return;
}
setAsParentNodeOf(node);
}
public boolean remove(Node node) {
int index = innerList.indexOf(node);
if (index != -1) {
notifyElementRemoved(index, node);
node.setParentNode(null);
}
return innerList.remove(node);
}
@SafeVarargs
public static <X extends Node> NodeList<X> nodeList(X... nodes) {
final NodeList<X> nodeList = new NodeList<>();
Collections.addAll(nodeList, nodes);
return nodeList;
}
public static <X extends Node> NodeList<X> nodeList(Collection<X> nodes) {
final NodeList<X> nodeList = new NodeList<>();
nodeList.addAll(nodes);
return nodeList;
}
public static <X extends Node> NodeList<X> nodeList(NodeList<X> nodes) {
final NodeList<X> nodeList = new NodeList<>();
nodeList.addAll(nodes);
return nodeList;
}
public boolean contains(N node) {
return innerList.contains(node);
}
@Override
public int size() {
return innerList.size();
}
@Override
public N get(int i) {
return innerList.get(i);
}
@Override
public Iterator<N> iterator() {
// TODO take care of "Iterator.remove"
return innerList.iterator();
}
@Override
public N set(int index, N element) {
if (index < 0 || index >= innerList.size()) {
throw new IllegalArgumentException("Illegal index. The index should be between 0 and " + innerList.size()
+ " excluded. It is instead " + index);
}
if (element == innerList.get(index)) {
return element;
}
notifyElementReplaced(index, element);
innerList.get(index).setParentNode(null);
setAsParentNodeOf(element);
return innerList.set(index, element);
}
@Override
public N remove(int index) {
notifyElementRemoved(index, innerList.get(index));
N remove = innerList.remove(index);
if (remove != null)
remove.setParentNode(null);
return remove;
}
@Override
public boolean isEmpty() {
return innerList.isEmpty();
}
@Override
public void sort(Comparator<? super N> comparator) {
innerList.sort(comparator);
}
public void addAll(NodeList<N> otherList) {
for (N node : otherList) {
add(node);
}
}
@Override
public void add(int index, N node) {
notifyElementAdded(index, node);
own(node);
innerList.add(index, node);
}
/**
* Inserts the node before all other nodes.
*/
public NodeList<N> addFirst(N node) {
add(0, node);
return this;
}
/**
* Inserts the node after all other nodes. (This is simply an alias for add.)
*/
public NodeList<N> addLast(N node) {
add(node);
return this;
}
/**
* Inserts the node after afterThisNode.
*
* @throws IllegalArgumentException when afterThisNode is not in this list.
*/
public NodeList<N> addAfter(N node, N afterThisNode) {
int i = indexOf(afterThisNode);
if (i == -1) {
throw new IllegalArgumentException("Can't find node to insert after.");
}
add(i + 1, node);
return this;
}
/**
* Inserts the node before beforeThisNode.
*
* @throws IllegalArgumentException when beforeThisNode is not in this list.
*/
public NodeList<N> addBefore(N node, N beforeThisNode) {
int i = indexOf(beforeThisNode);
if (i == -1) {
throw new IllegalArgumentException("Can't find node to insert before.");
}
add(i, node);
return this;
}
@Override
public Optional<Node> getParentNode() {
return Optional.ofNullable(parentNode);
}
/**
* Sets the parentNode
*
* @param parentNode the parentNode
* @return this, the NodeList
*/
@Override
public NodeList<N> setParentNode(Node parentNode) {
this.parentNode = parentNode;
setAsParentNodeOf(innerList);
return this;
}
@Override
public Node getParentNodeForChildren() {
return parentNode;
}
@Override
public <R, A> R accept(final GenericVisitor<R, A> v, final A arg) {
return v.visit(this, arg);
}
@Override
public <A> void accept(final VoidVisitor<A> v, final A arg) {
v.visit(this, arg);
}
/**
* @see java.lang.Iterable#forEach(java.util.function.Consumer)
*/
@Override
public void forEach(Consumer<? super N> action) {
innerList.forEach(action);
}
/**
* @see java.util.List#contains(java.lang.Object)
*/
@Override
public boolean contains(Object o) {
return innerList.contains(o);
}
/**
* @see java.util.List#toArray()
*/
@Override
public Object[] toArray() {
return innerList.toArray();
}
/**
* @see java.util.List#toArray(java.lang.Object[])
*/
@Override
public <T> T[] toArray(T[] a) {
return innerList.toArray(a);
}
/**
* @see java.util.List#remove(java.lang.Object)
*/
@Override
public boolean remove(Object o) {
if (o instanceof Node) {
return remove((Node) o);
} else {
return false;
}
}
/**
* @see java.util.List#containsAll(java.util.Collection)
*/
@Override
public boolean containsAll(Collection<?> c) {
return innerList.containsAll(c);
}
/**
* @see java.util.List#addAll(java.util.Collection)
*/
@Override
public boolean addAll(Collection<? extends N> c) {
c.forEach(this::add);
return !c.isEmpty();
}
/**
* @see java.util.List#addAll(int, java.util.Collection)
*/
@Override
public boolean addAll(int index, Collection<? extends N> c) {
for (N e : c) {
add(index++, e);
}
return !c.isEmpty();
}
/**
* @see java.util.List#removeAll(java.util.Collection)
*/
@Override
public boolean removeAll(Collection<?> c) {
boolean changed = false;
for (Object e : c) {
changed = remove(e) || changed;
}
return changed;
}
/**
* @see java.util.List#retainAll(java.util.Collection)
*/
@Override
public boolean retainAll(Collection<?> c) {
boolean changed = false;
for (Object e : this.stream().filter(it -> !c.contains(it)).toArray()) {
if (!c.contains(e)) {
changed = remove(e) || changed;
}
}
return changed;
}
/**
* @see java.util.List#replaceAll(java.util.function.UnaryOperator)
*/
@Override
public void replaceAll(UnaryOperator<N> operator) {
for (int i = 0; i < this.size(); i++) {
set(i, operator.apply(this.get(i)));
}
}
/**
* @see java.util.Collection#removeIf(java.util.function.Predicate)
*/
@Override
public boolean removeIf(Predicate<? super N> filter) {
boolean changed = false;
for (Object e : this.stream().filter(filter).toArray()) {
changed = remove(e) || changed;
}
return changed;
}
/**
* @see java.util.List#clear()
*/
@Override
public void clear() {
while (!isEmpty()) {
remove(0);
}
}
/**
* @see java.util.List#equals(java.lang.Object)
*/
@Override
public boolean equals(Object o) {
return innerList.equals(o);
}
/**
* @see java.util.List#hashCode()
*/
@Override
public int hashCode() {
return innerList.hashCode();
}
/**
* @see java.util.List#indexOf(java.lang.Object)
*/
@Override
public int indexOf(Object o) {
return innerList.indexOf(o);
}
/**
* @see java.util.List#lastIndexOf(java.lang.Object)
*/
@Override
public int lastIndexOf(Object o) {
return innerList.lastIndexOf(o);
}
/**
* @see java.util.List#listIterator()
*/
@Override
public ListIterator<N> listIterator() {
return innerList.listIterator();
}
/**
* @see java.util.List#listIterator(int)
*/
@Override
public ListIterator<N> listIterator(int index) {
return innerList.listIterator(index);
}
/**
* @see java.util.Collection#parallelStream()
*/
@Override
public Stream<N> parallelStream() {
return innerList.parallelStream();
}
/**
* @see java.util.List#subList(int, int)
*/
@Override
public List<N> subList(int fromIndex, int toIndex) {
return innerList.subList(fromIndex, toIndex);
}
/**
* @see java.util.List#spliterator()
*/
@Override
public Spliterator<N> spliterator() {
return innerList.spliterator();
}
private void notifyElementAdded(int index, Node nodeAddedOrRemoved) {
this.observers.forEach(o -> o.listChange(this, AstObserver.ListChangeType.ADDITION, index, nodeAddedOrRemoved));
}
private void notifyElementRemoved(int index, Node nodeAddedOrRemoved) {
this.observers.forEach(o -> o.listChange(this, AstObserver.ListChangeType.REMOVAL, index, nodeAddedOrRemoved));
}
private void notifyElementReplaced(int index, Node nodeAddedOrRemoved) {
this.observers.forEach(o -> o.listReplacement(this, index, this.get(index), nodeAddedOrRemoved));
}
@Override
public void unregister(AstObserver observer) {
this.observers.remove(observer);
}
@Override
public void register(AstObserver observer) {
this.observers.add(observer);
}
@Override
public boolean isRegistered(AstObserver observer) {
return this.observers.contains(observer);
}
/**
* Replaces the first node that is equal to "old" with "replacement".
*
* @return true if a replacement has happened.
*/
public boolean replace(N old, N replacement) {
int i = indexOf(old);
if (i == -1) {
return false;
}
set(i, replacement);
return true;
}
/**
* @return the opposite of isEmpty()
*/
public boolean isNonEmpty() {
return !isEmpty();
}
public void ifNonEmpty(Consumer<? super NodeList<N>> consumer) {
if (isNonEmpty())
consumer.accept(this);
}
public static <T extends Node> Collector<T, NodeList<T>, NodeList<T>> toNodeList() {
return Collector.of(NodeList::new, NodeList::add, (left, right) -> {
left.addAll(right);
return left;
});
}
private void setAsParentNodeOf(List<? extends Node> childNodes) {
if (childNodes != null) {
for (HasParentNode current : childNodes) {
current.setParentNode(getParentNodeForChildren());
}
}
}
private void setAsParentNodeOf(Node childNode) {
if (childNode != null) {
childNode.setParentNode(getParentNodeForChildren());
}
}
@Override
public String toString() {
return innerList.stream().map(Node::toString).collect(Collectors.joining(", ", "[", "]"));
}
}
| 1 | 11,938 | How is this related? | javaparser-javaparser | java |
@@ -3,6 +3,7 @@ class SubscriptionsController < ApplicationController
def new
@plans = IndividualPlan.featured.active.ordered
+ @team_plans = TeamPlan.featured.ordered
end
def edit | 1 | class SubscriptionsController < ApplicationController
before_filter :assign_mentor, only: [:new, :edit]
def new
@plans = IndividualPlan.featured.active.ordered
end
def edit
@plans = IndividualPlan.featured.active.ordered
end
def update
plan = IndividualPlan.find_by_sku!(params[:plan_id])
current_user.subscription.change_plan(plan)
redirect_to my_account_path, notice: I18n.t('subscriptions.flashes.change.success')
end
private
def assign_mentor
@mentor = User.find_or_sample_mentor(cookies[:mentor_id])
if cookies[:mentor_id].blank?
cookies[:mentor_id] ||= @mentor.id
end
end
end
| 1 | 8,735 | I'm breaking one of the rules here, it feels like the right thing to do. Open to alternative suggestions. | thoughtbot-upcase | rb |
@@ -1,8 +1,8 @@
shared_examples_for 'disables OpenSSH roaming' do
let(:disable_ssh_roaming) { %(echo -e "Host *\n UseRoaming no\n" | cat - $HOME/.ssh/config > $HOME/.ssh/config.tmp && mv $HOME/.ssh/config.tmp $HOME/.ssh/config) }
- let(:sexp) { sexp_find(subject, [:if, "$(sw_vers -productVersion | cut -d . -f 2) -lt 12"]) }
+ let(:sexp) { sexp_find(subject, [:if, %("$(sw_vers -productVersion 2>/dev/null | cut -d . -f 2)" -lt 12)]) }
it 'disables OpenSSH roaming' do
- sexp.should include_sexp [:cmd, disable_ssh_roaming]
+ expect(sexp).to include_sexp [:cmd, disable_ssh_roaming]
end
end | 1 | shared_examples_for 'disables OpenSSH roaming' do
let(:disable_ssh_roaming) { %(echo -e "Host *\n UseRoaming no\n" | cat - $HOME/.ssh/config > $HOME/.ssh/config.tmp && mv $HOME/.ssh/config.tmp $HOME/.ssh/config) }
let(:sexp) { sexp_find(subject, [:if, "$(sw_vers -productVersion | cut -d . -f 2) -lt 12"]) }
it 'disables OpenSSH roaming' do
sexp.should include_sexp [:cmd, disable_ssh_roaming]
end
end
| 1 | 14,576 | The use of `#should` was triggering an rspec depracation warning for me, which is why I switched this to the rspec 3 style. | travis-ci-travis-build | rb |
@@ -172,7 +172,7 @@ bool ReaderProxy::requested_changes_set(std::vector<SequenceNumber_t>& seqNumSet
{
auto chit = m_changesForReader.find(ChangeForReader_t(*sit));
- if(chit != m_changesForReader.end() && chit->isValid())
+ if(chit != m_changesForReader.end())
{
ChangeForReader_t newch(*chit);
newch.setStatus(REQUESTED); | 1 | // Copyright 2016 Proyectos y Sistemas de Mantenimiento SL (eProsima).
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/**
* @file ReaderProxy.cpp
*
*/
#include <fastrtps/rtps/writer/ReaderProxy.h>
#include <fastrtps/rtps/writer/StatefulWriter.h>
#include <fastrtps/utils/TimeConversion.h>
#include <fastrtps/rtps/writer/timedevent/NackResponseDelay.h>
#include <fastrtps/rtps/writer/timedevent/NackSupressionDuration.h>
#include <fastrtps/rtps/writer/timedevent/InitialHeartbeat.h>
#include <fastrtps/log/Log.h>
#include <fastrtps/rtps/resources/AsyncWriterThread.h>
#include <fastrtps/rtps/history/WriterHistory.h>
#include <mutex>
#include <cassert>
using namespace eprosima::fastrtps::rtps;
ReaderProxy::ReaderProxy(const RemoteReaderAttributes& rdata,const WriterTimes& times,StatefulWriter* SW) :
m_att(rdata), mp_SFW(SW),
mp_nackResponse(nullptr), mp_nackSupression(nullptr), mp_initialHeartbeat(nullptr), m_lastAcknackCount(0),
mp_mutex(new std::recursive_mutex()), lastNackfragCount_(0)
{
if(rdata.endpoint.reliabilityKind == RELIABLE)
{
mp_nackResponse = new NackResponseDelay(this,TimeConv::Time_t2MilliSecondsDouble(times.nackResponseDelay));
mp_nackSupression = new NackSupressionDuration(this,TimeConv::Time_t2MilliSecondsDouble(times.nackSupressionDuration));
mp_initialHeartbeat = new InitialHeartbeat(this, TimeConv::Time_t2MilliSecondsDouble(times.initialHeartbeatDelay));
}
logInfo(RTPS_WRITER,"Reader Proxy created");
}
ReaderProxy::~ReaderProxy()
{
destroy_timers();
delete(mp_mutex);
}
void ReaderProxy::destroy_timers()
{
if(mp_nackResponse != nullptr)
{
delete(mp_nackResponse);
mp_nackResponse = nullptr;
}
if(mp_nackSupression != nullptr)
{
delete(mp_nackSupression);
mp_nackSupression = nullptr;
}
if(mp_initialHeartbeat != nullptr)
{
delete(mp_initialHeartbeat);
mp_initialHeartbeat = nullptr;
}
}
void ReaderProxy::addChange(const ChangeForReader_t& change)
{
std::lock_guard<std::recursive_mutex> guard(*mp_mutex);
assert(change.getSequenceNumber() > changesFromRLowMark_);
assert(m_changesForReader.rbegin() != m_changesForReader.rend() ?
change.getSequenceNumber() > m_changesForReader.rbegin()->getSequenceNumber() :
true);
if(m_changesForReader.size() == 0 && change.getStatus() == ACKNOWLEDGED)
{
changesFromRLowMark_ = change.getSequenceNumber();
return;
}
m_changesForReader.insert(change);
//TODO (Ricardo) Remove this functionality from here. It is not his place.
if (change.getStatus() == UNSENT)
AsyncWriterThread::wakeUp(mp_SFW);
}
size_t ReaderProxy::countChangesForReader() const
{
std::lock_guard<std::recursive_mutex> guard(*mp_mutex);
return m_changesForReader.size();
}
bool ReaderProxy::change_is_acked(const SequenceNumber_t& sequence_number)
{
std::lock_guard<std::recursive_mutex> guard(*mp_mutex);
if(sequence_number <= changesFromRLowMark_)
return true;
auto chit = m_changesForReader.find(ChangeForReader_t(sequence_number));
assert(chit != m_changesForReader.end());
return !chit->isRelevant() || chit->getStatus() == ACKNOWLEDGED;
}
void ReaderProxy::acked_changes_set(const SequenceNumber_t& seqNum)
{
std::lock_guard<std::recursive_mutex> guard(*mp_mutex);
SequenceNumber_t future_low_mark = seqNum;
if(seqNum > changesFromRLowMark_)
{
auto chit = m_changesForReader.find(seqNum);
m_changesForReader.erase(m_changesForReader.begin(), chit);
}
else
{
// Special case. Currently only used on Builtin StatefulWriters
// after losing lease duration.
SequenceNumber_t current_sequence = seqNum;
if(seqNum < mp_SFW->get_seq_num_min())
{
current_sequence = mp_SFW->get_seq_num_min();
}
future_low_mark = current_sequence;
for(; current_sequence <= changesFromRLowMark_; ++current_sequence)
{
CacheChange_t* change = nullptr;
if(mp_SFW->mp_history->get_change(current_sequence, mp_SFW->getGuid(), &change))
{
ChangeForReader_t cr(change);
cr.setStatus(UNACKNOWLEDGED);
m_changesForReader.insert(cr);
}
else
{
ChangeForReader_t cr;
cr.setStatus(UNACKNOWLEDGED);
cr.notValid();
m_changesForReader.insert(cr);
}
}
}
changesFromRLowMark_ = future_low_mark - 1;
}
bool ReaderProxy::requested_changes_set(std::vector<SequenceNumber_t>& seqNumSet)
{
bool isSomeoneWasSetRequested = false;
std::lock_guard<std::recursive_mutex> guard(*mp_mutex);
for(std::vector<SequenceNumber_t>::iterator sit=seqNumSet.begin();sit!=seqNumSet.end();++sit)
{
auto chit = m_changesForReader.find(ChangeForReader_t(*sit));
if(chit != m_changesForReader.end() && chit->isValid())
{
ChangeForReader_t newch(*chit);
newch.setStatus(REQUESTED);
newch.markAllFragmentsAsUnsent();
auto hint = m_changesForReader.erase(chit);
m_changesForReader.insert(hint, newch);
isSomeoneWasSetRequested = true;
}
}
if(isSomeoneWasSetRequested)
{
logInfo(RTPS_WRITER,"Requested Changes: " << seqNumSet);
}
return isSomeoneWasSetRequested;
}
//TODO(Ricardo) Temporal
//std::vector<const ChangeForReader_t*> ReaderProxy::get_unsent_changes() const
std::vector<ChangeForReader_t*> ReaderProxy::get_unsent_changes()
{
std::vector<ChangeForReader_t*> unsent_changes;
std::lock_guard<std::recursive_mutex> guard(*mp_mutex);
for(auto &change_for_reader : m_changesForReader)
if(change_for_reader.getStatus() == UNSENT)
unsent_changes.push_back(const_cast<ChangeForReader_t*>(&change_for_reader));
return unsent_changes;
}
std::vector<const ChangeForReader_t*> ReaderProxy::get_requested_changes() const
{
std::vector<const ChangeForReader_t*> unsent_changes;
std::lock_guard<std::recursive_mutex> guard(*mp_mutex);
auto it = m_changesForReader.begin();
for (; it!= m_changesForReader.end(); ++it)
if(it->getStatus() == REQUESTED)
unsent_changes.push_back(&(*it));
return unsent_changes;
}
void ReaderProxy::set_change_to_status(const SequenceNumber_t& seq_num, ChangeForReaderStatus_t status)
{
if(seq_num <= changesFromRLowMark_)
return;
auto it = m_changesForReader.find(ChangeForReader_t(seq_num));
bool mustWakeUpAsyncThread = false;
if(it != m_changesForReader.end())
{
if(status == ACKNOWLEDGED && it == m_changesForReader.begin())
{
m_changesForReader.erase(it);
changesFromRLowMark_ = seq_num;
}
else
{
ChangeForReader_t newch(*it);
newch.setStatus(status);
if (status == UNSENT) mustWakeUpAsyncThread = true;
auto hint = m_changesForReader.erase(it);
m_changesForReader.insert(hint, newch);
}
}
if (mustWakeUpAsyncThread)
AsyncWriterThread::wakeUp(mp_SFW);
}
bool ReaderProxy::mark_fragment_as_sent_for_change(const CacheChange_t* change, FragmentNumber_t fragment)
{
if(change->sequenceNumber <= changesFromRLowMark_)
return false;
bool allFragmentsSent = false;
auto it = m_changesForReader.find(ChangeForReader_t(change->sequenceNumber));
bool mustWakeUpAsyncThread = false;
if(it != m_changesForReader.end())
{
ChangeForReader_t newch(*it);
newch.markFragmentsAsSent(fragment);
if (newch.getUnsentFragments().isSetEmpty())
{
newch.setStatus(UNDERWAY); //TODO (Ricardo) Check
allFragmentsSent = true;
}
else
mustWakeUpAsyncThread = true;
auto hint = m_changesForReader.erase(it);
m_changesForReader.insert(hint, newch);
}
if (mustWakeUpAsyncThread)
AsyncWriterThread::wakeUp(mp_SFW);
return allFragmentsSent;
}
void ReaderProxy::convert_status_on_all_changes(ChangeForReaderStatus_t previous, ChangeForReaderStatus_t next)
{
std::lock_guard<std::recursive_mutex> guard(*mp_mutex);
bool mustWakeUpAsyncThread = false;
auto it = m_changesForReader.begin();
while(it != m_changesForReader.end())
{
if(it->getStatus() == previous)
{
if(next == ACKNOWLEDGED && it == m_changesForReader.begin())
{
changesFromRLowMark_ = it->getSequenceNumber();
it = m_changesForReader.erase(it);
continue;
}
else
{
ChangeForReader_t newch(*it);
newch.setStatus(next);
if (next == UNSENT && previous != UNSENT)
mustWakeUpAsyncThread = true;
auto hint = m_changesForReader.erase(it);
it = m_changesForReader.insert(hint, newch);
}
}
++it;
}
if (mustWakeUpAsyncThread)
AsyncWriterThread::wakeUp(mp_SFW);
}
//TODO(Ricardo)
//void ReaderProxy::setNotValid(const CacheChange_t* change)
void ReaderProxy::setNotValid(CacheChange_t* change)
{
std::lock_guard<std::recursive_mutex> guard(*mp_mutex);
// Check sequence number is in the container, because it was not clean up.
if(m_changesForReader.size() == 0 || change->sequenceNumber < m_changesForReader.begin()->getSequenceNumber())
return;
auto chit = m_changesForReader.find(ChangeForReader_t(change));
// Element must be in the container. In other case, bug.
assert(chit != m_changesForReader.end());
if(chit == m_changesForReader.begin())
{
assert(chit->getStatus() != ACKNOWLEDGED);
// if it is the first element, set state to unacknowledge because from now reader has to confirm
// it will not be expecting it.
ChangeForReader_t newch(*chit);
newch.setStatus(UNACKNOWLEDGED);
newch.notValid();
auto hint = m_changesForReader.erase(chit);
m_changesForReader.insert(hint, newch);
}
else
{
// In case its state is not ACKNOWLEDGED, set it to UNACKNOWLEDGE because from now reader has to confirm
// it will not be expecting it.
ChangeForReader_t newch(*chit);
if (chit->getStatus() != ACKNOWLEDGED)
newch.setStatus(UNACKNOWLEDGED);
newch.notValid();
auto hint = m_changesForReader.erase(chit);
m_changesForReader.insert(hint, newch);
}
}
bool ReaderProxy::thereIsUnacknowledged() const
{
bool returnedValue = false;
std::lock_guard<std::recursive_mutex> guard(*mp_mutex);
for(auto it = m_changesForReader.begin(); it!=m_changesForReader.end(); ++it)
{
if(it->getStatus() == UNACKNOWLEDGED)
{
returnedValue = true;
break;
}
}
return returnedValue;
}
bool change_min(const ChangeForReader_t* ch1, const ChangeForReader_t* ch2)
{
return ch1->getSequenceNumber() < ch2->getSequenceNumber();
}
bool change_min2(const ChangeForReader_t ch1, const ChangeForReader_t ch2)
{
return ch1.getSequenceNumber() < ch2.getSequenceNumber();
}
bool ReaderProxy::minChange(std::vector<ChangeForReader_t*>* Changes,
ChangeForReader_t* changeForReader)
{
std::lock_guard<std::recursive_mutex> guard(*mp_mutex);
*changeForReader = **std::min_element(Changes->begin(),Changes->end(),change_min);
return true;
}
bool ReaderProxy::requested_fragment_set(SequenceNumber_t sequence_number, const FragmentNumberSet_t& frag_set)
{
std::lock_guard<std::recursive_mutex> guard(*mp_mutex);
// Locate the outbound change referenced by the NACK_FRAG
auto changeIter = std::find_if(m_changesForReader.begin(), m_changesForReader.end(),
[sequence_number](const ChangeForReader_t& change)
{return change.getSequenceNumber() == sequence_number;});
if (changeIter == m_changesForReader.end())
return false;
ChangeForReader_t newch(*changeIter);
auto hint = m_changesForReader.erase(changeIter);
newch.markFragmentsAsUnsent(frag_set);
// If it was UNSENT, we shouldn't switch back to REQUESTED to prevent stalling.
if (newch.getStatus() != UNSENT)
newch.setStatus(REQUESTED);
m_changesForReader.insert(hint, newch);
return true;
}
| 1 | 12,792 | Why are you setting the CacheChange as REQUESTED when it is not valid (it was erased from history)? | eProsima-Fast-DDS | cpp |
@@ -44,9 +44,12 @@ import {
isDataZeroForReporting,
} from '../util';
-const { __ } = wp.i18n;
-const { Component, Fragment } = wp.element;
-const { isEmpty } = lodash;
+/**
+ * WordPress dependencies
+ */
+import { __ } from '@wordpress/i18n';
+import { Component, Fragment } from '@wordpress/element';
+import { isEmpty } from 'lodash';
class AnalyticsDashboardWidgetTopLevel extends Component {
constructor( props ) { | 1 | /**
* AnalyticsDashboardWidgetTopLevel component.
*
* Site Kit by Google, Copyright 2019 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* External dependencies
*/
import DataBlock from 'GoogleComponents/data-block.js';
import withData from 'GoogleComponents/higherorder/withdata';
import { TYPE_MODULES } from 'GoogleComponents/data';
/**
* Internal dependencies
*/
import Sparkline from 'GoogleComponents/sparkline';
import CTA from 'GoogleComponents/notifications/cta';
import PreviewBlock from 'GoogleComponents/preview-block';
import {
getTimeInSeconds,
readableLargeNumber,
extractForSparkline,
getSiteKitAdminURL,
} from 'GoogleUtil';
import {
calculateOverviewData,
extractAnalyticsDashboardSparklineData,
getAnalyticsErrorMessageFromData,
siteAnalyticsReportDataDefaults,
overviewReportDataDefaults,
isDataZeroForReporting,
} from '../util';
const { __ } = wp.i18n;
const { Component, Fragment } = wp.element;
const { isEmpty } = lodash;
class AnalyticsDashboardWidgetTopLevel extends Component {
constructor( props ) {
super( props );
this.state = {
accounts: false,
goals: false,
};
}
// When additional data is returned, componentDidUpdate will fire.
componentDidUpdate() {
this.processCallbackData();
}
componentDidMount() {
this.processCallbackData();
}
/**
* Process callback data received from the API.
*/
processCallbackData() {
const {
data,
requestDataToState,
} = this.props;
if ( data && ! data.error && 'function' === typeof requestDataToState ) {
this.setState( requestDataToState );
}
}
render() {
const {
overview,
extractedAnalytics,
goals,
} = this.state;
const { permaLink } = googlesitekit;
const href = getSiteKitAdminURL( 'googlesitekit-module-analytics', {} );
const goalURL = 'https://support.google.com/analytics/answer/1032415?hl=en#create_or_edit_goals';
let totalUsers = '',
totalUsersChange = '',
goalCompletions = '',
goalCompletionsChange = '',
averageBounceRate = '',
averageBounceRateChange = '';
if ( overview ) {
totalUsers = overview.totalUsers;
totalUsersChange = overview.totalUsersChange;
goalCompletions = overview.goalCompletions;
goalCompletionsChange = overview.goalCompletionsChange;
averageBounceRate = overview.averageBounceRate;
averageBounceRateChange = overview.averageBounceRateChange;
}
return (
<Fragment>
<div className="
mdc-layout-grid__cell
mdc-layout-grid__cell--align-bottom
mdc-layout-grid__cell--span-2-phone
mdc-layout-grid__cell--span-2-tablet
mdc-layout-grid__cell--span-3-desktop
">
<DataBlock
className="overview-total-users"
title={ __( 'Unique Visitors from Search', 'google-site-kit' ) }
datapoint={ readableLargeNumber( totalUsers ) }
change={ totalUsersChange }
changeDataUnit="%"
source={ {
name: __( 'Analytics', 'google-site-kit' ),
link: href,
} }
sparkline={
extractedAnalytics &&
<Sparkline
data={ extractForSparkline( extractedAnalytics, 1 ) }
change={ totalUsersChange }
id="analytics-users-sparkline"
/>
}
/>
</div>
<div className="
mdc-layout-grid__cell
mdc-layout-grid__cell--align-bottom
mdc-layout-grid__cell--span-2-phone
mdc-layout-grid__cell--span-2-tablet
mdc-layout-grid__cell--span-3-desktop
">
{
/**
* The forth block shows goals for general view, and average time on page for detail view.
*/
}
{
permaLink && (
<DataBlock
className="overview-bounce-rate"
title={ __( 'Bounce Rate', 'google-site-kit' ) }
datapoint={ Number( averageBounceRate ).toFixed( 2 ) }
datapointUnit={ __( '%', 'google-site-kit' ) }
change={ averageBounceRateChange }
changeDataUnit="%"
reverseArrowDirection
source={ {
name: __( 'Analytics', 'google-site-kit' ),
link: href,
} }
sparkline={
extractedAnalytics &&
<Sparkline
data={ extractForSparkline( extractedAnalytics, 2 ) }
change={ averageBounceRateChange }
id="analytics-sessions-sparkline"
/>
}
/>
) }
{ ! permaLink && goals && isEmpty( goals.items ) && (
<CTA
title={ __( 'Use goals to measure success. ', 'google-site-kit' ) }
description={ __( 'Goals measure how well your site or app fulfills your target objectives.', 'google-site-kit' ) }
ctaLink={ goalURL }
ctaLabel={ __( 'Create a new goal', 'google-site-kit' ) }
/>
)
}
{ ! permaLink && goals && ! isEmpty( goals.items ) && (
<DataBlock
className="overview-goals-completed"
title={ __( 'Goals Completed', 'google-site-kit' ) }
datapoint={ readableLargeNumber( goalCompletions ) }
change={ goalCompletionsChange }
changeDataUnit="%"
source={ {
name: __( 'Analytics', 'google-site-kit' ),
link: href,
} }
sparkline={
extractedAnalytics &&
<Sparkline
data={ extractForSparkline( extractedAnalytics, 3 ) }
change={ goalCompletionsChange }
id="analytics-sessions-sparkline"
/>
}
/>
) }
{ ! permaLink && ! goals && (
<PreviewBlock width="100%" height="202px" />
) }
</div>
</Fragment>
);
}
}
const isDataZero = ( data, datapoint ) => {
if ( 'report' === datapoint ) {
return isDataZeroForReporting( data );
}
return false;
};
/*
Note: toState callbacks below accept the current data and state into an object which is passed to setState.
This is because withData changes the props passed to the child for each request.
*/
export default withData(
AnalyticsDashboardWidgetTopLevel,
[
{
type: TYPE_MODULES,
identifier: 'analytics',
datapoint: 'report',
data: {
...overviewReportDataDefaults,
url: googlesitekit.permaLink,
},
priority: 1,
maxAge: getTimeInSeconds( 'day' ),
context: 'Dashboard',
toState( state, { data } ) {
if ( ! state.overview ) {
return {
overview: calculateOverviewData( data ),
};
}
},
},
{
type: TYPE_MODULES,
identifier: 'analytics',
datapoint: 'report',
data: {
...siteAnalyticsReportDataDefaults,
url: googlesitekit.permaLink,
},
priority: 1,
maxAge: getTimeInSeconds( 'day' ),
context: 'Dashboard',
toState( state, { data } ) {
if ( ! state.extractedAnalytics ) {
return {
extractedAnalytics: extractAnalyticsDashboardSparklineData( data ),
};
}
},
},
{
type: TYPE_MODULES,
identifier: 'analytics',
datapoint: 'goals',
data: {
url: googlesitekit.permaLink,
},
priority: 1,
maxAge: getTimeInSeconds( 'hour' ),
context: 'Dashboard',
toState( state, { data } ) {
if ( ! state.goals ) {
return {
goals: data,
};
}
},
},
],
<Fragment>
<div className="
mdc-layout-grid__cell
mdc-layout-grid__cell--align-bottom
mdc-layout-grid__cell--span-2-phone
mdc-layout-grid__cell--span-2-tablet
mdc-layout-grid__cell--span-3-desktop
">
<PreviewBlock width="100%" height="202px" />
</div>
<div className="
mdc-layout-grid__cell
mdc-layout-grid__cell--align-bottom
mdc-layout-grid__cell--span-2-phone
mdc-layout-grid__cell--span-2-tablet
mdc-layout-grid__cell--span-3-desktop
">
<PreviewBlock width="100%" height="202px" />
</div>
</Fragment>,
{
inGrid: true,
},
isDataZero,
getAnalyticsErrorMessageFromData
);
| 1 | 24,750 | `lodash` shouldn't be grouped under WordPress dependencies | google-site-kit-wp | js |
@@ -69,7 +69,7 @@ class GenericDataFile
/**
* Used by Avro reflection to instantiate this class when reading manifest files.
*/
- public GenericDataFile(org.apache.avro.Schema avroSchema) {
+ GenericDataFile(org.apache.avro.Schema avroSchema) {
this.avroSchema = avroSchema;
Types.StructType schema = AvroSchemaUtil.convert(avroSchema).asNestedType().asStructType(); | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg;
import com.google.common.base.Objects;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import java.io.Serializable;
import java.nio.ByteBuffer;
import java.util.List;
import java.util.Map;
import org.apache.avro.generic.IndexedRecord;
import org.apache.avro.specific.SpecificData;
import org.apache.iceberg.avro.AvroSchemaUtil;
import org.apache.iceberg.types.Type;
import org.apache.iceberg.types.Types;
import org.apache.iceberg.util.ByteBuffers;
class GenericDataFile
implements DataFile, IndexedRecord, StructLike, SpecificData.SchemaConstructable, Serializable {
private static final Types.StructType EMPTY_STRUCT_TYPE = Types.StructType.of();
private static final PartitionData EMPTY_PARTITION_DATA = new PartitionData(EMPTY_STRUCT_TYPE) {
@Override
public PartitionData copy() {
return this; // this does not change
}
};
private int[] fromProjectionPos;
private Types.StructType partitionType;
private String filePath = null;
private FileFormat format = null;
private PartitionData partitionData = null;
private Long recordCount = null;
private long fileSizeInBytes = -1L;
private long blockSizeInBytes = -1L;
// optional fields
private Integer fileOrdinal = null; // boxed for nullability
private List<Integer> sortColumns = null;
private Map<Integer, Long> columnSizes = null;
private Map<Integer, Long> valueCounts = null;
private Map<Integer, Long> nullValueCounts = null;
private Map<Integer, ByteBuffer> lowerBounds = null;
private Map<Integer, ByteBuffer> upperBounds = null;
private ByteBuffer keyMetadata = null;
// cached schema
private transient org.apache.avro.Schema avroSchema = null;
/**
* Used by Avro reflection to instantiate this class when reading manifest files.
*/
public GenericDataFile(org.apache.avro.Schema avroSchema) {
this.avroSchema = avroSchema;
Types.StructType schema = AvroSchemaUtil.convert(avroSchema).asNestedType().asStructType();
// partition type may be null if the field was not projected
Type partType = schema.fieldType("partition");
if (partType != null) {
this.partitionType = partType.asNestedType().asStructType();
} else {
this.partitionType = EMPTY_STRUCT_TYPE;
}
List<Types.NestedField> fields = schema.fields();
List<Types.NestedField> allFields = DataFile.getType(partitionType).fields();
this.fromProjectionPos = new int[fields.size()];
for (int i = 0; i < fromProjectionPos.length; i += 1) {
boolean found = false;
for (int j = 0; j < allFields.size(); j += 1) {
if (fields.get(i).fieldId() == allFields.get(j).fieldId()) {
found = true;
fromProjectionPos[i] = j;
}
}
if (!found) {
throw new IllegalArgumentException("Cannot find projected field: " + fields.get(i));
}
}
this.partitionData = new PartitionData(partitionType);
}
GenericDataFile(String filePath, FileFormat format, long recordCount,
long fileSizeInBytes, long blockSizeInBytes) {
this.filePath = filePath;
this.format = format;
this.partitionData = EMPTY_PARTITION_DATA;
this.partitionType = EMPTY_PARTITION_DATA.getPartitionType();
this.recordCount = recordCount;
this.fileSizeInBytes = fileSizeInBytes;
this.blockSizeInBytes = blockSizeInBytes;
this.fileOrdinal = null;
this.sortColumns = null;
this.columnSizes = null;
this.valueCounts = null;
this.nullValueCounts = null;
this.lowerBounds = null;
this.upperBounds = null;
this.fromProjectionPos = null;
}
GenericDataFile(String filePath, FileFormat format, PartitionData partition,
long recordCount, long fileSizeInBytes, long blockSizeInBytes) {
this.filePath = filePath;
this.format = format;
this.partitionData = partition;
this.partitionType = partition.getPartitionType();
this.recordCount = recordCount;
this.fileSizeInBytes = fileSizeInBytes;
this.blockSizeInBytes = blockSizeInBytes;
this.fileOrdinal = null;
this.sortColumns = null;
this.columnSizes = null;
this.valueCounts = null;
this.nullValueCounts = null;
this.lowerBounds = null;
this.upperBounds = null;
this.fromProjectionPos = null;
}
GenericDataFile(String filePath, FileFormat format, PartitionData partition,
long fileSizeInBytes, long blockSizeInBytes, Metrics metrics) {
this.filePath = filePath;
this.format = format;
// this constructor is used by DataFiles.Builder, which passes null for unpartitioned data
if (partition == null) {
this.partitionData = EMPTY_PARTITION_DATA;
this.partitionType = EMPTY_PARTITION_DATA.getPartitionType();
} else {
this.partitionData = partition;
this.partitionType = partition.getPartitionType();
}
// this will throw NPE if metrics.recordCount is null
this.recordCount = metrics.recordCount();
this.fileSizeInBytes = fileSizeInBytes;
this.blockSizeInBytes = blockSizeInBytes;
this.fileOrdinal = null;
this.sortColumns = null;
this.columnSizes = metrics.columnSizes();
this.valueCounts = metrics.valueCounts();
this.nullValueCounts = metrics.nullValueCounts();
this.lowerBounds = SerializableByteBufferMap.wrap(metrics.lowerBounds());
this.upperBounds = SerializableByteBufferMap.wrap(metrics.upperBounds());
this.fromProjectionPos = null;
}
GenericDataFile(String filePath, FileFormat format, PartitionData partition,
long fileSizeInBytes, long blockSizeInBytes, Metrics metrics,
ByteBuffer keyMetadata) {
this(filePath, format, partition, fileSizeInBytes, blockSizeInBytes, metrics);
this.keyMetadata = keyMetadata;
}
/**
* Copy constructor.
*
* @param toCopy a generic data file to copy.
*/
private GenericDataFile(GenericDataFile toCopy) {
this.filePath = toCopy.filePath;
this.format = toCopy.format;
this.partitionData = toCopy.partitionData.copy();
this.partitionType = toCopy.partitionType;
this.recordCount = toCopy.recordCount;
this.fileSizeInBytes = toCopy.fileSizeInBytes;
this.blockSizeInBytes = toCopy.blockSizeInBytes;
this.fileOrdinal = toCopy.fileOrdinal;
this.sortColumns = copy(toCopy.sortColumns);
// TODO: support lazy conversion to/from map
this.columnSizes = copy(toCopy.columnSizes);
this.valueCounts = copy(toCopy.valueCounts);
this.nullValueCounts = copy(toCopy.nullValueCounts);
this.lowerBounds = SerializableByteBufferMap.wrap(copy(toCopy.lowerBounds));
this.upperBounds = SerializableByteBufferMap.wrap(copy(toCopy.upperBounds));
this.fromProjectionPos = toCopy.fromProjectionPos;
this.keyMetadata = toCopy.keyMetadata == null ? null : ByteBuffers.copy(toCopy.keyMetadata);
}
/**
* Constructor for Java serialization.
*/
GenericDataFile() {
}
@Override
public CharSequence path() {
return filePath;
}
@Override
public FileFormat format() {
return format;
}
@Override
public StructLike partition() {
return partitionData;
}
@Override
public long recordCount() {
return recordCount;
}
@Override
public long fileSizeInBytes() {
return fileSizeInBytes;
}
@Override
public long blockSizeInBytes() {
return blockSizeInBytes;
}
@Override
public Integer fileOrdinal() {
return fileOrdinal;
}
@Override
public List<Integer> sortColumns() {
return sortColumns;
}
@Override
public Map<Integer, Long> columnSizes() {
return columnSizes;
}
@Override
public Map<Integer, Long> valueCounts() {
return valueCounts;
}
@Override
public Map<Integer, Long> nullValueCounts() {
return nullValueCounts;
}
@Override
public Map<Integer, ByteBuffer> lowerBounds() {
return lowerBounds;
}
@Override
public Map<Integer, ByteBuffer> upperBounds() {
return upperBounds;
}
@Override
public ByteBuffer keyMetadata() {
return keyMetadata;
}
@Override
public org.apache.avro.Schema getSchema() {
if (avroSchema == null) {
this.avroSchema = getAvroSchema(partitionType);
}
return avroSchema;
}
@Override
@SuppressWarnings("unchecked")
public void put(int i, Object v) {
int pos = i;
// if the schema was projected, map the incoming ordinal to the expected one
if (fromProjectionPos != null) {
pos = fromProjectionPos[i];
}
switch (pos) {
case 0:
// always coerce to String for Serializable
this.filePath = v.toString();
return;
case 1:
this.format = FileFormat.valueOf(v.toString());
return;
case 2:
this.partitionData = (PartitionData) v;
return;
case 3:
this.recordCount = (Long) v;
return;
case 4:
this.fileSizeInBytes = (Long) v;
return;
case 5:
this.blockSizeInBytes = (Long) v;
return;
case 6:
this.fileOrdinal = (Integer) v;
return;
case 7:
this.sortColumns = (List<Integer>) v;
return;
case 8:
this.columnSizes = (Map<Integer, Long>) v;
return;
case 9:
this.valueCounts = (Map<Integer, Long>) v;
return;
case 10:
this.nullValueCounts = (Map<Integer, Long>) v;
return;
case 11:
this.lowerBounds = SerializableByteBufferMap.wrap((Map<Integer, ByteBuffer>) v);
return;
case 12:
this.upperBounds = SerializableByteBufferMap.wrap((Map<Integer, ByteBuffer>) v);
return;
case 13:
this.keyMetadata = (ByteBuffer) v;
default:
// ignore the object, it must be from a newer version of the format
}
}
@Override
public Object get(int i) {
int pos = i;
// if the schema was projected, map the incoming ordinal to the expected one
if (fromProjectionPos != null) {
pos = fromProjectionPos[i];
}
switch (pos) {
case 0:
return filePath;
case 1:
return format != null ? format.toString() : null;
case 2:
return partitionData;
case 3:
return recordCount;
case 4:
return fileSizeInBytes;
case 5:
return blockSizeInBytes;
case 6:
return fileOrdinal;
case 7:
return sortColumns;
case 8:
return columnSizes;
case 9:
return valueCounts;
case 10:
return nullValueCounts;
case 11:
return lowerBounds;
case 12:
return upperBounds;
case 13:
return keyMetadata;
default:
throw new UnsupportedOperationException("Unknown field ordinal: " + pos);
}
}
private static org.apache.avro.Schema getAvroSchema(Types.StructType partitionType) {
Types.StructType type = DataFile.getType(partitionType);
return AvroSchemaUtil.convert(type, ImmutableMap.of(
type, GenericDataFile.class.getName(),
partitionType, PartitionData.class.getName()));
}
@Override
public int size() {
return 14;
}
@Override
public <T> T get(int pos, Class<T> javaClass) {
return javaClass.cast(get(pos));
}
@Override
public <T> void set(int pos, T value) {
put(pos, value);
}
@Override
public DataFile copy() {
return new GenericDataFile(this);
}
@Override
public String toString() {
return Objects.toStringHelper(this)
.add("file_path", filePath)
.add("file_format", format)
.add("partition", partitionData)
.add("record_count", recordCount)
.add("file_size_in_bytes", fileSizeInBytes)
.add("block_size_in_bytes", blockSizeInBytes)
.add("column_sizes", columnSizes)
.add("value_counts", valueCounts)
.add("null_value_counts", nullValueCounts)
.add("lower_bounds", lowerBounds)
.add("upper_bounds", upperBounds)
.add("key_metadata", keyMetadata == null ? "null" : "(redacted)")
.toString();
}
private static <K, V> Map<K, V> copy(Map<K, V> map) {
if (map != null) {
return ImmutableMap.copyOf(map);
}
return null;
}
private static <E> List<E> copy(List<E> list) {
if (list != null) {
return ImmutableList.copyOf(list);
}
return null;
}
}
| 1 | 13,201 | This needs to be public so that Avro can call it. | apache-iceberg | java |
@@ -18,18 +18,7 @@
/**
* External dependencies
*/
-import data, { TYPE_CORE } from 'GoogleComponents/data';
-import SvgIcon from 'GoogleUtil/svg-icon';
-
-export * from './storage';
-
-const { apiFetch } = wp;
-const {
- addFilter,
- applyFilters,
-} = wp.hooks;
-
-const {
+import {
map,
isNull,
isUndefined, | 1 | /**
* Utility functions.
*
* Site Kit by Google, Copyright 2019 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* External dependencies
*/
import data, { TYPE_CORE } from 'GoogleComponents/data';
import SvgIcon from 'GoogleUtil/svg-icon';
export * from './storage';
const { apiFetch } = wp;
const {
addFilter,
applyFilters,
} = wp.hooks;
const {
map,
isNull,
isUndefined,
unescape,
deburr,
toLower,
trim,
} = lodash;
const {
_n,
sprintf,
} = wp.i18n;
const { addQueryArgs, getQueryString } = wp.url;
const { __ } = wp.i18n;
/**
* Remove a parameter from a URL string.
*
* Fallback for when URL is unable to handle this.
*
* @param {string} url The URL to process.
* @param {string} parameter The URL parameter to remove.
*/
const removeURLFallBack = ( url, parameter ) => {
const urlparts = url.split( '?' );
if ( 2 <= urlparts.length ) {
const prefix = encodeURIComponent( parameter ) + '=';
const pars = urlparts[ 1 ].split( /[&;]/g );
//reverse iteration as may be destructive
const newPars = pars.filter( ( param ) => {
return -1 === param.lastIndexOf( prefix, 0 );
} );
url = urlparts[ 0 ] + '/' + ( 0 < newPars.length ? '?' + newPars.join( '&' ) : '' );
return url;
}
return url;
};
/**
* Remove a parameter from a URL string.
*
* Leverages the URL object internally.
*
* @param {string} url The URL to process.
* @param {string} parameter The URL parameter to remove.
*/
export const removeURLParameter = ( url, parameter ) => {
const parsedURL = new URL( url );
// If the URL implementation doesn't support ! parsedURL.searchParams, use the fallback handler.
if ( ! parsedURL.searchParams || ! parsedURL.searchParams.delete ) {
return removeURLFallBack( url, parameter );
}
parsedURL.searchParams.delete( parameter );
return parsedURL.href;
};
/**
* Format a large number for shortened display.
*
* @param {number} number The large number to format.
* @param {string|boolean} currencyCode Optional currency code to format as amount.
*
* @return {string} The formatted number.
*/
export const readableLargeNumber = ( number, currencyCode = false ) => {
let readableNumber;
// Handle passed data undefined.
if ( isUndefined( number ) ) {
readableNumber = 0;
} else if ( 1000000 < number ) {
number = number / 1000000;
readableNumber = number.toFixed( 1 ) + 'M';
} else if ( 1000 < number ) {
number = number / 1000;
if ( 99 < number ) {
readableNumber = Math.round( number ) + 'K';
} else {
readableNumber = number.toFixed( 1 ) + 'K';
}
} else {
readableNumber = number;
}
// Handle errors after calculations.
if ( isNull( number ) || isUndefined( number ) || isNaN( number ) ) {
readableNumber = '';
number = 0;
}
if ( 0 === number ) {
readableNumber = '0.00';
return currencyCode ?
new Intl.NumberFormat( navigator.language, { style: 'currency', currency: currencyCode } ).format( number ) :
number;
}
// Format as amount if currencyCode is passed.
if ( false !== currencyCode && '' !== readableNumber ) {
const formatedParts = new Intl.NumberFormat( navigator.language, { style: 'currency', currency: currencyCode } ).formatToParts( number );
const decimal = formatedParts.find( ( part ) => 'decimal' === part.type );
if ( ! isUndefined( decimal ) && ! isUndefined( decimal.value ) && 1000 > number ) {
readableNumber = Number.isInteger( number ) ? number : number.replace( '.', decimal.value );
}
const currencyFound = formatedParts.find( ( part ) => 'currency' === part.type );
const currency = currencyFound ? currencyFound.value : '';
return `${ currency }${ readableNumber }`;
}
return readableNumber;
};
/**
* Internationalization Number Format.
*
* @param {number} number The number to format.
* @param {string} locale Optional, locale to format as amount, default to Browser's locale.
*
* @return {string} The formatted number.
*/
export const numberFormat = ( number, locale = '' ) => {
if ( ! locale ) {
locale = navigator.language;
}
// This line to make sure we use lower case local format, ex: en-us.
locale = locale.replace( '_', '-' ).toLocaleLowerCase();
return new Intl.NumberFormat( locale ).format( number );
};
/**
* Transform a period string into a number of seconds.
*
* @param {string} period The period to transform.
*
* @return {number} The number of seconds
*/
export const getTimeInSeconds = ( period ) => {
const minute = 60;
const hour = minute * 60;
const day = hour * 24;
const week = day * 7;
const month = day * 30;
const year = day * 365;
switch ( period ) {
case 'minute':
return minute;
case 'hour':
return hour;
case 'day':
return day;
case 'week':
return week;
case 'month':
return month;
case 'year':
return year;
}
};
/**
* Converts seconds to a display ready string indicating
* the number of hours, minutes and seconds that have elapsed.
*
* For example, passing 65 returns '1m 5s'.
*
* @param {number} seconds The number of seconds.
*/
export const prepareSecondsForDisplay = ( seconds ) => {
seconds = parseInt( seconds, 10 );
if ( isNaN( seconds ) || 0 === seconds ) {
return '0.0s';
}
const results = {};
results.hours = Math.floor( seconds / 60 / 60 );
results.minutes = Math.floor( ( seconds / 60 ) % 60 );
results.seconds = Math.floor( seconds % 60 );
const returnString =
( results.hours ? results.hours + 'h ' : '' ) +
( results.minutes ? results.minutes + 'm ' : '' ) +
( results.seconds ? results.seconds + 's ' : '' );
return returnString.trim();
};
/**
* Retrieve number of days between 2 dates.
*
* @param {Object} dateStart
* @param {Object} dateEnd
*
* @return {number} The number of days.
*/
export const getDaysBetweenDates = ( dateStart, dateEnd ) => {
const dayMs = 1000 * getTimeInSeconds( 'day' );
const dateStartMs = dateStart.getTime();
const dateEndMs = dateEnd.getTime();
return Math.round( Math.abs( dateStartMs - dateEndMs ) / dayMs );
};
/**
* Calculate the percent change between two values.
*
* @param {number} previous The previous value.
* @param {number} current The current value.
*
* @return {number|string} The percent change.
*/
export const changeToPercent = ( previous, current ) => {
// Prevent divide by zero errors.
if ( '0' === previous || 0 === previous || isNaN( previous ) ) {
return '';
}
const change = ( ( current - previous ) / previous * 100 ).toFixed( 1 );
// Avoid NaN at all costs.
if ( isNaN( change ) || 'Infinity' === change ) {
return '';
}
return change;
};
/**
* Fallback helper to get a query parameter from the current URL.
*
* Used when URL.searchParams is unavailable.
*
* @param {string} name Query param to search for.
* @return {string}
*/
const fallbackGetQueryParamater = ( name ) => {
const queries = location.search.substr( 1 ).split( '&' );
const queryDict = {};
for ( let i = 0; i < queries.length; i++ ) {
queryDict[ queries[ i ].split( '=' )[ 0 ] ] = decodeURIComponent( queries[ i ].split( '=' )[ 1 ] );
}
// If the name is specified, return that specific get parameter
if ( name ) {
return queryDict.hasOwnProperty( name ) ? decodeURIComponent( queryDict[ name ].replace( /\+/g, ' ' ) ) : '';
}
return queryDict;
};
/**
* Get query parameter from the current URL.
*
* @param {string} name Query param to search for.
* @return {string}
*/
export const getQueryParameter = ( name ) => {
const url = new URL( location.href );
if ( name ) {
if ( ! url.searchParams || ! url.searchParams.get ) {
return fallbackGetQueryParamater( name );
}
return url.searchParams.get( name );
}
const query = {};
for ( const [ key, value ] of url.searchParams.entries() ) {
query[ key ] = value;
}
return query;
};
/**
* Extract a single column of data for a sparkline from a dataset prepared for google charts.
*
* @param {Array} rowData An array of google charts row data.
* @param {number} column The column to extract for the sparkline.
*/
export const extractForSparkline = ( rowData, column ) => {
return map( rowData, ( row, i ) => {
return [
row[ 0 ], // row[0] always contains the x axis value (typically date).
row[ column ] || ( 0 === i ? '' : 0 ), // the data for the sparkline.
];
} );
};
export const refreshAuthentication = async () => {
try {
const response = await data.get( TYPE_CORE, 'user', 'authentication' );
const requiredAndGrantedScopes = response.grantedScopes.filter( ( scope ) => {
return -1 !== response.requiredScopes.indexOf( scope );
} );
// We should really be using state management. This is terrible.
window.googlesitekit.setup = window.googlesitekit.setup || {};
window.googlesitekit.setup.isAuthenticated = response.isAuthenticated;
window.googlesitekit.setup.requiredScopes = response.requiredScopes;
window.googlesitekit.setup.grantedScopes = response.grantedScopes;
window.googlesitekit.setup.needReauthenticate = requiredAndGrantedScopes.length < response.requiredScopes.length;
} catch ( e ) { // eslint-disable-line no-empty
}
};
/**
* Get the URL needed to initiate a reAuth flow.
*
* @param {string} slug The module slug. If included redirect URL will include page: page={ `googlesitekit-${slug}`}.
* @param {boolean} status The module activation status.
*/
export const getReAuthURL = ( slug, status ) => {
const {
connectURL,
adminRoot,
} = googlesitekit.admin;
const { needReauthenticate } = window.googlesitekit.setup;
const { screenID } = googlesitekit.modules[ slug ];
// Special case handling for PageSpeed Insights.
// TODO: Refactor this out.
const pageSpeedQueryArgs = 'pagespeed-insights' === slug ? {
notification: 'authentication_success',
reAuth: undefined,
} : {};
let redirect = addQueryArgs(
adminRoot,
{
// If the module has a submenu page, and is being activated, redirect back to the module page.
page: ( slug && status && screenID ) ? screenID : 'googlesitekit-dashboard',
slug,
reAuth: status,
...pageSpeedQueryArgs,
}
);
if ( ! needReauthenticate ) {
return redirect;
}
// Encodes the query string to ensure the redirect url is not messing up with the main url.
const queryString = encodeURIComponent( getQueryString( redirect ) );
// Rebuild the redirect url.
redirect = adminRoot + '?' + queryString;
return addQueryArgs(
connectURL, {
redirect,
status,
}
);
};
/**
* Replace a filtered component with the passed component and merge their props.
*
* Components wrapped in the 'withFilters' higher order component have a filter applied to them (wp.hooks.applyFilters).
* This helper is used to replace (or "Fill") a filtered component with a passed component. To use, pass as the third
* argument to an addFilter call, eg:
*
* addFilter( `googlesitekit.ModuleSettingsDetails-${slug}`,
* 'googlesitekit.AdSenseModuleSettingsDetails',
* fillFilterWithComponent( AdSenseSettings, {
* onSettingsPage: true,
* } ) );
*
* @param {Component} NewComponent The component to render in place of the filtered component.
* @param {Object} newProps The props to pass down to the new component.
*/
export const fillFilterWithComponent = ( NewComponent, newProps ) => {
return ( OriginalComponent ) => {
return function InnerComponent( props ) {
return (
<NewComponent { ...props } { ...newProps } OriginalComponent={ OriginalComponent } />
);
};
};
};
/**
* Get Site Kit Admin URL Helper
*
* @param { string } page The page slug. Optional. Default is 'googlesitekit-dashboard'.
* @param { Object } args Optional. Object of argiments to add to the URL.
*
* @return string
*/
export const getSiteKitAdminURL = ( page, args ) => {
const { adminRoot } = googlesitekit.admin;
if ( ! page ) {
page = 'googlesitekit-dashboard';
}
args = { page, ...args };
return addQueryArgs( adminRoot, args );
};
/**
* Verifies if the Front End site has been loaded in the iframe to check for tag presence.
*
* @return mixed Returns the iframe if it's loaded, false if not loaded.
*/
export const isFrontendIframeLoaded = () => {
const iframe = document.getElementById( 'sitekit_fe_load_check' );
if ( iframe ) {
return iframe;
}
return false;
};
/**
* Verifies whether JSON is valid.
*
* @param { string } stringToValidate The string to validate.
*
* @return boolean Whether JSON is valid.
*/
export const validateJSON = ( stringToValidate ) => {
try {
return ( JSON.parse( stringToValidate ) && !! stringToValidate );
} catch ( e ) {
return false;
}
};
/**
* Verifies Optimize ID
*
* @param { string } stringToValidate The string to validate.
*
* @return boolean
*/
export const validateOptimizeID = ( stringToValidate ) => {
return ( stringToValidate.match( /^GTM-[a-zA-Z\d]{7}$/ ) );
};
/**
* Appends a notification count icon to the Site Kit dashboard menu/admin bar when
* user is outside the Site Kit app.
*
* Retrieves the number from local storage previously stored by NotificationCounter
* used in googlesitekit-admin.js
*/
export const appendNotificationsCount = ( count = 0 ) => {
let menuSelector = null;
let adminbarSelector = null;
const counterMenu = document.querySelector( '#toplevel_page_googlesitekit-dashboard #googlesitekit-notifications-counter' );
const counterAdminbar = document.querySelector( '#wp-admin-bar-google-site-kit #googlesitekit-notifications-counter' );
if ( counterMenu && counterAdminbar ) {
return false;
}
menuSelector = document.querySelector( '#toplevel_page_googlesitekit-dashboard .wp-menu-name' );
adminbarSelector = document.querySelector( '#wp-admin-bar-google-site-kit .ab-item' );
if ( null === menuSelector && null === adminbarSelector ) {
return false;
}
const wrapper = document.createElement( 'span' );
wrapper.setAttribute( 'class', `googlesitekit-notifications-counter update-plugins count-${ count }` );
wrapper.setAttribute( 'id', 'googlesitekit-notifications-counter' );
const pluginCount = document.createElement( 'span' );
pluginCount.setAttribute( 'class', 'plugin-count' );
pluginCount.setAttribute( 'aria-hidden', 'true' );
pluginCount.textContent = count;
const screenReader = document.createElement( 'span' );
screenReader.setAttribute( 'class', 'screen-reader-text' );
screenReader.textContent = sprintf(
_n(
'%d notification',
'%d notifications',
count,
'google-site-kit'
),
count
);
wrapper.appendChild( pluginCount );
wrapper.appendChild( screenReader );
if ( menuSelector && null === counterMenu ) {
menuSelector.appendChild( wrapper );
}
if ( adminbarSelector && null === counterAdminbar ) {
adminbarSelector.appendChild( wrapper );
}
return wrapper;
};
/**
* Send an analytics tracking event.
*
* @param {string} eventCategory The event category. Required.
* @param {string} eventName The event category. Required.
* @param {string} eventLabel The event category. Optional.
* @param {string} eventValue The event category. Optional.
*
*/
export const sendAnalyticsTrackingEvent = ( eventCategory, eventName, eventLabel = '', eventValue = '' ) => {
if ( 'undefined' === typeof gtag ) {
return;
}
const {
siteURL,
siteUserID,
} = googlesitekit.admin;
const { isFirstAdmin } = googlesitekit.setup;
const { trimEnd } = lodash;
if ( googlesitekit.admin.trackingOptin ) {
return gtag( 'event', eventName, {
send_to: googlesitekit.admin.trackingID, /*eslint camelcase: 0*/
event_category: eventCategory, /*eslint camelcase: 0*/
event_label: eventLabel, /*eslint camelcase: 0*/
event_value: eventValue, /*eslint camelcase: 0*/
dimension1: trimEnd( siteURL, '/' ), // Domain.
dimension2: isFirstAdmin ? 'true' : 'false', // First Admin?
dimension3: siteUserID, // Identifier.
} );
}
};
export const findTagInHtmlContent = ( html, module ) => {
let existingTag = false;
if ( ! html ) {
return false;
}
existingTag = extractTag( html, module );
return existingTag;
};
/**
* Looks for existing tag requesting front end html, if no existing tag was found on server side
* while requesting list of accounts.
*
* @param {string} module Module slug.
*
* @param {string|null} The tag id if found, otherwise null.
*/
export const getExistingTag = async ( module ) => {
const { homeURL, ampMode } = googlesitekit.admin;
const tagFetchQueryArgs = {
// Indicates a tag checking request. This lets Site Kit know not to output its own tags.
tagverify: 1,
// Add a timestamp for cache-busting.
timestamp: Date.now(),
};
// Always check the homepage regardless of AMP mode.
let tagFound = await scrapeTag( addQueryArgs( homeURL, tagFetchQueryArgs ), module );
if ( ! tagFound && 'secondary' === ampMode ) {
tagFound = await apiFetch( { path: '/wp/v2/posts?per_page=1' } ).then(
// Scrape the first post in AMP mode, if there is one.
( posts ) => posts.slice( 0, 1 ).map( async ( post ) => {
return await scrapeTag( addQueryArgs( post.link, { ...tagFetchQueryArgs, amp: 1 } ), module );
} ).pop()
);
}
return Promise.resolve( tagFound || null );
};
/**
* Scrapes a module tag from the given URL.
*
* @param {string} url URL request and parse tag from.
* @param {string} module The module to parse tag for.
*
* @return {string|null} The tag id if found, otherwise null.
*/
export const scrapeTag = async ( url, module ) => {
try {
const html = await fetch( url ).then( ( res ) => res.text() );
return extractTag( html, module ) || null;
} catch ( error ) {
return null;
}
};
/**
* Extracts the tag related to a module from the given string by detecting Analytics and AdSense tag variations.
*
* @param {string} string The string from where to find the tag.
* @param {string} tag The tag to search for, one of 'adsense' or 'analytics'
*
* @return string|bool The tag id if found, otherwise false.
*/
export const extractTag = ( string, tag ) => {
let result = false;
let reg = null;
switch ( tag ) {
case 'analytics':
// Detect gtag script calls.
reg = new RegExp( /<script [^>]*src=['|"]https:\/\/www.googletagmanager.com\/gtag\/js\?id=(.*?)['|"][^>]*><\/script>/gm );
result = reg.exec( string );
result = result ? result[ 1 ] : false;
// Detect common analytics code usage.
if ( ! result ) {
reg = new RegExp( /__gaTracker\( ?['|"]create['|"], ?['|"](.*?)['|"], ?['|"]auto['|"] ?\)/gm );
result = reg.exec( string );
result = result ? result[ 1 ] : false;
}
// Detect ga create calls.
if ( ! result ) {
reg = new RegExp( /ga\( ?['|"]create['|"], ?['|"](.*?)['|"], ?['|"]auto['|"] ?\)/gm );
result = reg.exec( string );
result = result ? result[ 1 ] : false;
}
if ( ! result ) {
reg = new RegExp( /_gaq.push\( ?\[ ?['|"]_setAccount['|"], ?['|"](.*?)['|"] ?] ?\)/gm );
result = reg.exec( string );
result = result ? result[ 1 ] : false;
}
// Detect amp-analytics gtag.
if ( ! result ) {
reg = new RegExp( /<amp-analytics [^>]*type="gtag"[^>]*>[^<]*<script type="application\/json">[^<]*"gtag_id":\s*"([^"]+)"/gm );
result = reg.exec( string );
result = result ? result[ 1 ] : false;
}
// Detect amp-analytics googleanalytics.
if ( ! result ) {
reg = new RegExp( /<amp-analytics [^>]*type="googleanalytics"[^>]*>[^<]*<script type="application\/json">[^<]*"account":\s*"([^"]+)"/gm );
result = reg.exec( string );
result = result ? result[ 1 ] : false;
}
break;
case 'adsense':
// Detect google_ad_client.
reg = new RegExp( /google_ad_client: ?["|'](.*?)["|']/gm );
result = reg.exec( string );
result = result ? result[ 1 ] : false;
// Detect amp-auto-ads tag.
if ( ! result ) {
reg = new RegExp( /<amp-auto-ads [^>]*data-ad-client="([^"]+)"/gm );
result = reg.exec( string );
result = result ? result[ 1 ] : false;
}
break;
}
return result;
};
/**
* Activate or Deactivate a Module.
*
* @param {Object} restApiClient Rest API client from data module, this needed so we don't need to import data module in helper.
* @param {string} moduleSlug Module slug to activate or deactivate.
* @param {boolean} status True if module should be activated, false if it should be deactivated.
* @return {Promise}
*/
export const activateOrDeactivateModule = ( restApiClient, moduleSlug, status ) => {
return restApiClient.setModuleActive( moduleSlug, status ).then( ( responseData ) => {
// We should really be using state management. This is terrible.
if ( window.googlesitekit.modules && window.googlesitekit.modules[ moduleSlug ] ) {
window.googlesitekit.modules[ moduleSlug ].active = responseData.active;
}
sendAnalyticsTrackingEvent(
`${ moduleSlug }_setup`,
! responseData.active ? 'module_deactivate' : 'module_activate',
moduleSlug,
);
return new Promise( ( resolve ) => {
resolve( responseData );
} );
} );
};
/**
* Helper to toggle confirm changes button disable/enable
* depending on the module changed settings.
*
* @param {string} moduleSlug The module slug being edited.
* @param {Object} settingsMapping The mapping between form settings names and saved settings.
* @param {Object} settingsState The changed settings component state to compare with.
* @param {Object} skipDOM Skip DOm checks/modifications, used for testing.
*/
export const toggleConfirmModuleSettings = ( moduleSlug, settingsMapping, settingsState, skipDOM = false ) => {
const { settings, setupComplete } = googlesitekit.modules[ moduleSlug ];
const confirm = skipDOM || document.getElementById( `confirm-changes-${ moduleSlug }` );
if ( ! setupComplete || ! confirm ) {
return;
}
const currentSettings = [];
Object.keys( settingsState ).forEach( ( key ) => {
if ( -1 < Object.keys( settingsMapping ).indexOf( key ) ) {
currentSettings[ settingsMapping[ key ] ] = settingsState[ key ];
}
} );
const savedSettings = [];
Object.keys( settings ).forEach( ( key ) => {
if ( -1 < Object.values( settingsMapping ).indexOf( key ) ) {
savedSettings[ key ] = settings[ key ];
}
} );
const changed = Object.keys( savedSettings ).filter( ( key ) => {
if ( savedSettings[ key ] !== currentSettings[ key ] ) {
return true;
}
return false;
} );
if ( 0 < changed.length ) {
if ( skipDOM ) {
return true;
}
confirm.removeAttribute( 'disabled' );
} else {
if ( skipDOM ) {
return false;
}
confirm.setAttribute( 'disabled', 'disabled' );
}
};
/**
* Trigger error notification on top of the page.
*
* @param {Component} ErrorComponent The error component to render in place.
* @param {Object} props The props to pass down to the error component. Optional.
*/
export const showErrorNotification = ( ErrorComponent, props = {} ) => {
addFilter( 'googlesitekit.ErrorNotification',
'googlesitekit.ErrorNotification',
fillFilterWithComponent( ErrorComponent, props ), 1 );
};
/**
* HTML text into HTML entity.
*
* _.unescape doesn't seem to decode some entities for admin bar titles.
* adding combination in this helper as a workaround.
*
* @param {string} str The string to decode.
*
* @return {string}
*/
export const decodeHtmlEntity = ( str ) => {
const decoded = str.replace( /&#(\d+);/g, function( match, dec ) {
return String.fromCharCode( dec );
} ).replace( /(\\)/g, '' );
return unescape( decoded );
};
/**
* Performs some basic cleanup of a string for use as a post slug
*
* Emnulates santize_title() from WordPress core.
*
* @return {string} Processed string
*/
export function stringToSlug( string ) {
return toLower( deburr( trim( string.replace( /[\s./_]+/g, '-' ), '-' ) ) );
}
/**
* Gets the current dateRange string.
*
* @return {string} the date range string.
*/
export function getCurrentDateRange() {
/**
* Filter the date range used for queries.
*
* @param String The selected date range. Default 'Last 28 days'.
*/
return applyFilters( 'googlesitekit.dateRange', __( 'Last 28 days', 'google-site-kit' ) );
}
/**
* Return the currently selected date range as a string that fits in the sentence:
* "Data for the last [date range]", eg "Date for the last 28 days".
*/
export function getDateRangeFrom() {
return getCurrentDateRange().replace( 'Last ', '' );
}
/**
* Gets the current dateRange slug.
*
* @return {string} the date range slug.
*/
export function getCurrentDateRangeSlug() {
return stringToSlug( getCurrentDateRange() );
}
/**
* Get the icon for a module.
*
* @param {string} module The module slug.
* @param {boolean} blockedByParentModule Whether the module is blocked by a parent module.
* @param {string} width The icon width.
* @param {string} height The icon height.
* @param {string} class Class string to use for icon.
*/
export function moduleIcon( module, blockedByParentModule, width = '33', height = '33', useClass = '' ) {
if ( ! googlesitekit ) {
return;
}
/* Set module icons. Page Speed Insights is a special case because only a .png is available. */
let iconComponent = <SvgIcon id={ module } width={ width } height={ height } className={ useClass } />;
if ( blockedByParentModule ) {
iconComponent = <SvgIcon id={ `${ module }-disabled` } width={ width } height={ height } className={ useClass } />;
} else if ( 'pagespeed-insights' === module ) {
iconComponent = <img src={ googlesitekit.admin.assetsRoot + 'images/icon-pagespeed.png' } width={ width } alt="" className={ useClass } />;
}
return iconComponent;
}
/**
* Clears session storage and local storage.
*
* Both of these should be cleared to make sure no Site Kit data is left in the
* browser's cache regardless of which storage implementation is used.
*/
export function clearAppLocalStorage() {
if ( window.localStorage ) {
window.localStorage.clear();
}
if ( window.sessionStorage ) {
window.sessionStorage.clear();
}
}
/**
* Sorts an object by its keys.
*
* The returned value will be a sorted copy of the input object.
* Any inner objects will also be sorted recursively.
*
* @param {Object} obj The data object to sort.
* @return {Object} The sorted data object.
*/
export function sortObjectProperties( obj ) {
const orderedData = {};
Object.keys( obj ).sort().forEach( ( key ) => {
let val = obj[ key ];
if ( val && 'object' === typeof val && ! Array.isArray( val ) ) {
val = sortObjectProperties( val );
}
orderedData[ key ] = val;
} );
return orderedData;
}
| 1 | 24,756 | `Google*` shouldn't be under External dependencies - seems like ESlint is not properly recognizing that these are aliases to internal dependencies. | google-site-kit-wp | js |
@@ -245,6 +245,13 @@ class DataFrame(_Frame):
else:
super(DataFrame, self).__init__(_InternalFrame(
data, data_columns=index.data_columns, index_map=index.index_map))
+ elif isinstance(data, ks.Series):
+ assert index is None
+ assert columns is None
+ assert dtype is None
+ assert not copy
+ data = data.to_dataframe()
+ super(DataFrame, self).__init__(data._internal)
else:
if isinstance(data, pd.DataFrame):
assert index is None | 1 | #
# Copyright (C) 2019 Databricks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
A wrapper class for Spark DataFrame to behave similar to pandas DataFrame.
"""
import re
import warnings
from functools import partial, reduce
from typing import Any, Optional, List, Tuple, Union
import numpy as np
import pandas as pd
from pandas.api.types import is_list_like, is_dict_like
from pandas.core.dtypes.inference import is_sequence
from pyspark import sql as spark
from pyspark.sql import functions as F, Column, DataFrame as SDataFrame
from pyspark.sql.types import (BooleanType, ByteType, DecimalType, DoubleType, FloatType,
IntegerType, LongType, NumericType, ShortType, StructType)
from pyspark.sql.utils import AnalysisException
from databricks import koalas as ks # For running doctests and reference resolution in PyCharm.
from databricks.koalas.utils import validate_arguments_and_invoke_function
from databricks.koalas.generic import _Frame, max_display_count
from databricks.koalas.internal import _InternalFrame
from databricks.koalas.metadata import Metadata
from databricks.koalas.missing.frame import _MissingPandasLikeDataFrame
from databricks.koalas.ml import corr
from databricks.koalas.typedef import infer_pd_series_spark_type
# These regular expression patterns are complied and defined here to avoid to compile the same
# pattern every time it is used in _repr_ and _repr_html_ in DataFrame.
# Two patterns basically seek the footer string from Pandas'
REPR_PATTERN = re.compile(r"\n\n\[(?P<rows>[0-9]+) rows x (?P<columns>[0-9]+) columns\]$")
REPR_HTML_PATTERN = re.compile(
r"\n\<p\>(?P<rows>[0-9]+) rows × (?P<columns>[0-9]+) columns\<\/p\>\n\<\/div\>$")
_flex_doc_FRAME = """
Get {desc} of dataframe and other, element-wise (binary operator `{op_name}`).
Equivalent to ``{equiv}``. With reverse version, `{reverse}`.
Among flexible wrappers (`add`, `sub`, `mul`, `div`) to
arithmetic operators: `+`, `-`, `*`, `/`, `//`.
Parameters
----------
other : scalar
Any single data
Returns
-------
DataFrame
Result of the arithmetic operation.
Examples
--------
>>> df = ks.DataFrame({{'angles': [0, 3, 4],
... 'degrees': [360, 180, 360]}},
... index=['circle', 'triangle', 'rectangle'],
... columns=['angles', 'degrees'])
>>> df
angles degrees
circle 0 360
triangle 3 180
rectangle 4 360
Add a scalar with operator version which return the same
results.
>>> df + 1
angles degrees
circle 1 361
triangle 4 181
rectangle 5 361
>>> df.add(1)
angles degrees
circle 1 361
triangle 4 181
rectangle 5 361
Divide by constant with reverse version.
>>> df.div(10)
angles degrees
circle 0.0 36.0
triangle 0.3 18.0
rectangle 0.4 36.0
>>> df.rdiv(10)
angles degrees
circle NaN 0.027778
triangle 3.333333 0.055556
rectangle 2.500000 0.027778
Subtract by constant.
>>> df - 1
angles degrees
circle -1 359
triangle 2 179
rectangle 3 359
>>> df.sub(1)
angles degrees
circle -1 359
triangle 2 179
rectangle 3 359
Multiply by constant.
>>> df * 1
angles degrees
circle 0 360
triangle 3 180
rectangle 4 360
>>> df.mul(1)
angles degrees
circle 0 360
triangle 3 180
rectangle 4 360
Divide by constant.
>>> df / 1
angles degrees
circle 0.0 360.0
triangle 3.0 180.0
rectangle 4.0 360.0
>>> df.div(1)
angles degrees
circle 0.0 360.0
triangle 3.0 180.0
rectangle 4.0 360.0
"""
class DataFrame(_Frame):
"""
Koala DataFrame that corresponds to Pandas DataFrame logically. This holds Spark DataFrame
internally.
:ivar _sdf: Spark Column instance
:type _sdf: SDataFrame
:ivar _metadata: Metadata related to column names and index information.
:type _metadata: Metadata
Parameters
----------
data : numpy ndarray (structured or homogeneous), dict, Pandas DataFrame or Spark DataFrame
Dict can contain Series, arrays, constants, or list-like objects
If data is a dict, argument order is maintained for Python 3.6
and later.
Note that if `data` is a Pandas DataFrame, other arguments should not be used.
If `data` is a Spark DataFrame, all other arguments except `index` should not be used.
index : Index or array-like
Index to use for resulting frame. Will default to RangeIndex if
no indexing information part of input data and no index provided
If `data` is a Spark DataFrame, `index` is expected to be `Metadata`.
columns : Index or array-like
Column labels to use for resulting frame. Will default to
RangeIndex (0, 1, 2, ..., n) if no column labels are provided
dtype : dtype, default None
Data type to force. Only a single dtype is allowed. If None, infer
copy : boolean, default False
Copy data from inputs. Only affects DataFrame / 2d ndarray input
Examples
--------
Constructing DataFrame from a dictionary.
>>> d = {'col1': [1, 2], 'col2': [3, 4]}
>>> df = ks.DataFrame(data=d, columns=['col1', 'col2'])
>>> df
col1 col2
0 1 3
1 2 4
Constructing DataFrame from Pandas DataFrame
>>> df = ks.DataFrame(pd.DataFrame(data=d, columns=['col1', 'col2']))
>>> df
col1 col2
0 1 3
1 2 4
Notice that the inferred dtype is int64.
>>> df.dtypes
col1 int64
col2 int64
dtype: object
To enforce a single dtype:
>>> df = ks.DataFrame(data=d, dtype=np.int8)
>>> df.dtypes
col1 int8
col2 int8
dtype: object
Constructing DataFrame from numpy ndarray:
>>> df2 = ks.DataFrame(np.random.randint(low=0, high=10, size=(5, 5)),
... columns=['a', 'b', 'c', 'd', 'e'])
>>> df2 # doctest: +SKIP
a b c d e
0 3 1 4 9 8
1 4 8 4 8 4
2 7 6 5 6 7
3 8 7 9 1 0
4 2 5 4 3 9
"""
def __init__(self, data=None, index=None, columns=None, dtype=None, copy=False):
if isinstance(data, _InternalFrame):
assert index is None
assert columns is None
assert dtype is None
assert not copy
super(DataFrame, self).__init__(data)
elif isinstance(data, spark.DataFrame):
assert columns is None
assert dtype is None
assert not copy
if index is None:
super(DataFrame, self).__init__(_InternalFrame(data))
else:
super(DataFrame, self).__init__(_InternalFrame(
data, data_columns=index.data_columns, index_map=index.index_map))
else:
if isinstance(data, pd.DataFrame):
assert index is None
assert columns is None
assert dtype is None
assert not copy
pdf = data
else:
pdf = pd.DataFrame(data=data, index=index, columns=columns, dtype=dtype, copy=copy)
super(DataFrame, self).__init__(_InternalFrame.from_pandas(pdf))
@property
def _sdf(self) -> spark.DataFrame:
return self._internal.sdf
@_sdf.setter
def _sdf(self, sdf: spark.DataFrame) -> None:
self._internal = self._internal.copy(sdf=sdf)
@property
def _metadata(self) -> Metadata:
return Metadata(data_columns=self._internal.data_columns,
index_map=self._internal.index_map)
@_metadata.setter
def _metadata(self, metadata: Metadata) -> None:
self._internal = self._internal.copy(data_columns=metadata.data_columns,
index_map=metadata.index_map)
def _reduce_for_stat_function(self, sfun, numeric_only=False):
"""
Applies sfun to each column and returns a pd.Series where the number of rows equal the
number of columns.
Parameters
----------
sfun : either an 1-arg function that takes a Column and returns a Column, or
a 2-arg function that takes a Column and its DataType and returns a Column.
numeric_only : boolean, default False
If True, sfun is applied on numeric columns (including booleans) only.
"""
from inspect import signature
exprs = []
num_args = len(signature(sfun).parameters)
for col in self.columns:
col_sdf = self._sdf[col]
col_type = self._sdf.schema[col].dataType
is_numeric_or_boolean = isinstance(col_type, (NumericType, BooleanType))
min_or_max = sfun.__name__ in ('min', 'max')
keep_column = not numeric_only or is_numeric_or_boolean or min_or_max
if keep_column:
if isinstance(col_type, BooleanType) and not min_or_max:
# Stat functions cannot be used with boolean values by default
# Thus, cast to integer (true to 1 and false to 0)
# Exclude the min and max methods though since those work with booleans
col_sdf = col_sdf.cast('integer')
if num_args == 1:
# Only pass in the column if sfun accepts only one arg
col_sdf = sfun(col_sdf)
else: # must be 2
assert num_args == 2
# Pass in both the column and its data type if sfun accepts two args
col_sdf = sfun(col_sdf, col_type)
exprs.append(col_sdf.alias(col))
sdf = self._sdf.select(*exprs)
pdf = sdf.toPandas()
assert len(pdf) == 1, (sdf, pdf)
row = pdf.iloc[0]
row.name = None
return row # Return first row as a Series
# Arithmetic Operators
def _map_series_op(self, op, other):
if isinstance(other, DataFrame) or is_sequence(other):
raise ValueError(
"%s with another DataFrame or a sequence is currently not supported; "
"however, got %s." % (op, type(other)))
applied = []
for column in self._metadata.data_columns:
applied.append(getattr(self[column], op)(other))
sdf = self._sdf.select(
self._metadata.index_columns + [c._scol for c in applied])
metadata = self._metadata.copy(data_columns=[c.name for c in applied])
return DataFrame(sdf, metadata)
def __add__(self, other):
return self._map_series_op("add", other)
def __radd__(self, other):
return self._map_series_op("radd", other)
def __div__(self, other):
return self._map_series_op("div", other)
def __rdiv__(self, other):
return self._map_series_op("rdiv", other)
def __truediv__(self, other):
return self._map_series_op("truediv", other)
def __rtruediv__(self, other):
return self._map_series_op("rtruediv", other)
def __mul__(self, other):
return self._map_series_op("mul", other)
def __rmul__(self, other):
return self._map_series_op("rmul", other)
def __sub__(self, other):
return self._map_series_op("sub", other)
def __rsub__(self, other):
return self._map_series_op("rsub", other)
def add(self, other):
return self + other
add.__doc__ = _flex_doc_FRAME.format(
desc='Addition',
op_name='+',
equiv='dataframe + other',
reverse='radd')
def radd(self, other):
return other + self
radd.__doc__ = _flex_doc_FRAME.format(
desc='Addition',
op_name="+",
equiv="other + dataframe",
reverse='add')
def div(self, other):
return self / other
div.__doc__ = _flex_doc_FRAME.format(
desc='Floating division',
op_name="/",
equiv="dataframe / other",
reverse='rdiv')
divide = div
def rdiv(self, other):
return other / self
rdiv.__doc__ = _flex_doc_FRAME.format(
desc='Floating division',
op_name="/",
equiv="other / dataframe",
reverse='div')
def truediv(self, other):
return self / other
truediv.__doc__ = _flex_doc_FRAME.format(
desc='Floating division',
op_name="/",
equiv="dataframe / other",
reverse='rtruediv')
def rtruediv(self, other):
return other / self
rtruediv.__doc__ = _flex_doc_FRAME.format(
desc='Floating division',
op_name="/",
equiv="other / dataframe",
reverse='truediv')
def mul(self, other):
return self * other
mul.__doc__ = _flex_doc_FRAME.format(
desc='Multiplication',
op_name="*",
equiv="dataframe * other",
reverse='rmul')
multiply = mul
def rmul(self, other):
return other * self
rmul.__doc__ = _flex_doc_FRAME.format(
desc='Multiplication',
op_name="*",
equiv="other * dataframe",
reverse='mul')
def sub(self, other):
return self - other
sub.__doc__ = _flex_doc_FRAME.format(
desc='Subtraction',
op_name="-",
equiv="dataframe - other",
reverse='rsub')
subtract = sub
def rsub(self, other):
return other - self
rsub.__doc__ = _flex_doc_FRAME.format(
desc='Subtraction',
op_name="-",
equiv="other - dataframe",
reverse='sub')
def applymap(self, func):
"""
Apply a function to a Dataframe elementwise.
This method applies a function that accepts and returns a scalar
to every element of a DataFrame.
.. note:: unlike pandas, it is required for `func` to specify return type hint.
See https://docs.python.org/3/library/typing.html. For instance, as below:
>>> def function() -> int:
... return 1
Parameters
----------
func : callable
Python function, returns a single value from a single value.
Returns
-------
DataFrame
Transformed DataFrame.
Examples
--------
>>> df = ks.DataFrame([[1, 2.12], [3.356, 4.567]])
>>> df
0 1
0 1.000 2.120
1 3.356 4.567
>>> def str_len(x) -> int:
... return len(str(x))
>>> df.applymap(str_len)
0 1
0 3 4
1 5 5
>>> def power(x) -> float:
... return x ** 2
>>> df.applymap(power)
0 1
0 1.000000 4.494400
1 11.262736 20.857489
"""
applied = []
for column in self._metadata.data_columns:
applied.append(self[column].apply(func))
sdf = self._sdf.select(
self._metadata.index_columns + [c._scol for c in applied])
metadata = self._metadata.copy(data_columns=[c.name for c in applied])
return DataFrame(sdf, metadata)
def corr(self, method='pearson'):
"""
Compute pairwise correlation of columns, excluding NA/null values.
Parameters
----------
method : {'pearson', 'spearman'}
* pearson : standard correlation coefficient
* spearman : Spearman rank correlation
Returns
-------
y : pandas.DataFrame
See Also
--------
Series.corr
Examples
--------
>>> df = ks.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)],
... columns=['dogs', 'cats'])
>>> df.corr('pearson')
dogs cats
dogs 1.000000 -0.851064
cats -0.851064 1.000000
>>> df.corr('spearman')
dogs cats
dogs 1.000000 -0.948683
cats -0.948683 1.000000
Notes
-----
There are behavior differences between Koalas and pandas.
* the `method` argument only accepts 'pearson', 'spearman'
* the data should not contain NaNs. Koalas will return an error.
* Koalas doesn't support the following argument(s).
* `min_periods` argument is not supported
"""
return corr(self, method)
def iteritems(self):
"""
Iterator over (column name, Series) pairs.
Iterates over the DataFrame columns, returning a tuple with
the column name and the content as a Series.
Returns
-------
label : object
The column names for the DataFrame being iterated over.
content : Series
The column entries belonging to each label, as a Series.
Examples
--------
>>> df = ks.DataFrame({'species': ['bear', 'bear', 'marsupial'],
... 'population': [1864, 22000, 80000]},
... index=['panda', 'polar', 'koala'],
... columns=['species', 'population'])
>>> df
species population
panda bear 1864
polar bear 22000
koala marsupial 80000
>>> for label, content in df.iteritems():
... print('label:', label)
... print('content:', content.to_string())
...
label: species
content: panda bear
polar bear
koala marsupial
label: population
content: panda 1864
polar 22000
koala 80000
"""
cols = list(self.columns)
return list((col_name, self[col_name]) for col_name in cols)
def to_clipboard(self, excel=True, sep=None, **kwargs):
"""
Copy object to the system clipboard.
Write a text representation of object to the system clipboard.
This can be pasted into Excel, for example.
.. note:: This method should only be used if the resulting DataFrame is expected
to be small, as all the data is loaded into the driver's memory.
Parameters
----------
excel : bool, default True
- True, use the provided separator, writing in a csv format for
allowing easy pasting into excel.
- False, write a string representation of the object to the
clipboard.
sep : str, default ``'\\t'``
Field delimiter.
**kwargs
These parameters will be passed to DataFrame.to_csv.
Notes
-----
Requirements for your platform.
- Linux : `xclip`, or `xsel` (with `gtk` or `PyQt4` modules)
- Windows : none
- OS X : none
See Also
--------
read_clipboard : Read text from clipboard.
Examples
--------
Copy the contents of a DataFrame to the clipboard.
>>> df = ks.DataFrame([[1, 2, 3], [4, 5, 6]], columns=['A', 'B', 'C']) # doctest: +SKIP
>>> df.to_clipboard(sep=',') # doctest: +SKIP
... # Wrote the following to the system clipboard:
... # ,A,B,C
... # 0,1,2,3
... # 1,4,5,6
We can omit the the index by passing the keyword `index` and setting
it to false.
>>> df.to_clipboard(sep=',', index=False) # doctest: +SKIP
... # Wrote the following to the system clipboard:
... # A,B,C
... # 1,2,3
... # 4,5,6
This function also works for Series:
>>> df = ks.Series([1, 2, 3, 4, 5, 6, 7], name='x') # doctest: +SKIP
>>> df.to_clipboard(sep=',') # doctest: +SKIP
... # Wrote the following to the system clipboard:
... # 0, 1
... # 1, 2
... # 2, 3
... # 3, 4
... # 4, 5
... # 5, 6
... # 6, 7
"""
args = locals()
kdf = self
return validate_arguments_and_invoke_function(
kdf.to_pandas(), self.to_clipboard, pd.DataFrame.to_clipboard, args)
def to_html(self, buf=None, columns=None, col_space=None, header=True, index=True,
na_rep='NaN', formatters=None, float_format=None, sparsify=None, index_names=True,
justify=None, max_rows=None, max_cols=None, show_dimensions=False, decimal='.',
bold_rows=True, classes=None, escape=True, notebook=False, border=None,
table_id=None, render_links=False):
"""
Render a DataFrame as an HTML table.
.. note:: This method should only be used if the resulting Pandas object is expected
to be small, as all the data is loaded into the driver's memory. If the input
is large, set max_rows parameter.
Parameters
----------
buf : StringIO-like, optional
Buffer to write to.
columns : sequence, optional, default None
The subset of columns to write. Writes all columns by default.
col_space : int, optional
The minimum width of each column.
header : bool, optional
Write out the column names. If a list of strings is given, it
is assumed to be aliases for the column names
index : bool, optional, default True
Whether to print index (row) labels.
na_rep : str, optional, default 'NaN'
String representation of NAN to use.
formatters : list or dict of one-param. functions, optional
Formatter functions to apply to columns' elements by position or
name.
The result of each function must be a unicode string.
List must be of length equal to the number of columns.
float_format : one-parameter function, optional, default None
Formatter function to apply to columns' elements if they are
floats. The result of this function must be a unicode string.
sparsify : bool, optional, default True
Set to False for a DataFrame with a hierarchical index to print
every multiindex key at each row.
index_names : bool, optional, default True
Prints the names of the indexes.
justify : str, default None
How to justify the column labels. If None uses the option from
the print configuration (controlled by set_option), 'right' out
of the box. Valid values are
* left
* right
* center
* justify
* justify-all
* start
* end
* inherit
* match-parent
* initial
* unset.
max_rows : int, optional
Maximum number of rows to display in the console.
max_cols : int, optional
Maximum number of columns to display in the console.
show_dimensions : bool, default False
Display DataFrame dimensions (number of rows by number of columns).
decimal : str, default '.'
Character recognized as decimal separator, e.g. ',' in Europe.
bold_rows : bool, default True
Make the row labels bold in the output.
classes : str or list or tuple, default None
CSS class(es) to apply to the resulting html table.
escape : bool, default True
Convert the characters <, >, and & to HTML-safe sequences.
notebook : {True, False}, default False
Whether the generated HTML is for IPython Notebook.
border : int
A ``border=border`` attribute is included in the opening
`<table>` tag. Default ``pd.options.html.border``.
table_id : str, optional
A css id is included in the opening `<table>` tag if specified.
render_links : bool, default False
Convert URLs to HTML links (only works with Pandas 0.24+).
Returns
-------
str (or unicode, depending on data and options)
String representation of the dataframe.
See Also
--------
to_string : Convert DataFrame to a string.
"""
# Make sure locals() call is at the top of the function so we don't capture local variables.
args = locals()
if max_rows is not None:
kdf = self.head(max_rows)
else:
kdf = self
return validate_arguments_and_invoke_function(
kdf.to_pandas(), self.to_html, pd.DataFrame.to_html, args)
def to_string(self, buf=None, columns=None, col_space=None, header=True,
index=True, na_rep='NaN', formatters=None, float_format=None,
sparsify=None, index_names=True, justify=None,
max_rows=None, max_cols=None, show_dimensions=False,
decimal='.', line_width=None):
"""
Render a DataFrame to a console-friendly tabular output.
.. note:: This method should only be used if the resulting Pandas object is expected
to be small, as all the data is loaded into the driver's memory. If the input
is large, set max_rows parameter.
Parameters
----------
buf : StringIO-like, optional
Buffer to write to.
columns : sequence, optional, default None
The subset of columns to write. Writes all columns by default.
col_space : int, optional
The minimum width of each column.
header : bool, optional
Write out the column names. If a list of strings is given, it
is assumed to be aliases for the column names
index : bool, optional, default True
Whether to print index (row) labels.
na_rep : str, optional, default 'NaN'
String representation of NAN to use.
formatters : list or dict of one-param. functions, optional
Formatter functions to apply to columns' elements by position or
name.
The result of each function must be a unicode string.
List must be of length equal to the number of columns.
float_format : one-parameter function, optional, default None
Formatter function to apply to columns' elements if they are
floats. The result of this function must be a unicode string.
sparsify : bool, optional, default True
Set to False for a DataFrame with a hierarchical index to print
every multiindex key at each row.
index_names : bool, optional, default True
Prints the names of the indexes.
justify : str, default None
How to justify the column labels. If None uses the option from
the print configuration (controlled by set_option), 'right' out
of the box. Valid values are
* left
* right
* center
* justify
* justify-all
* start
* end
* inherit
* match-parent
* initial
* unset.
max_rows : int, optional
Maximum number of rows to display in the console.
max_cols : int, optional
Maximum number of columns to display in the console.
show_dimensions : bool, default False
Display DataFrame dimensions (number of rows by number of columns).
decimal : str, default '.'
Character recognized as decimal separator, e.g. ',' in Europe.
line_width : int, optional
Width to wrap a line in characters.
Returns
-------
str (or unicode, depending on data and options)
String representation of the dataframe.
See Also
--------
to_html : Convert DataFrame to HTML.
Examples
--------
>>> df = ks.DataFrame({'col1': [1, 2, 3], 'col2': [4, 5, 6]}, columns=['col1', 'col2'])
>>> print(df.to_string())
col1 col2
0 1 4
1 2 5
2 3 6
>>> print(df.to_string(max_rows=2))
col1 col2
0 1 4
1 2 5
"""
# Make sure locals() call is at the top of the function so we don't capture local variables.
args = locals()
if max_rows is not None:
kdf = self.head(max_rows)
else:
kdf = self
return validate_arguments_and_invoke_function(
kdf.to_pandas(), self.to_string, pd.DataFrame.to_string, args)
def to_dict(self, orient='dict', into=dict):
"""
Convert the DataFrame to a dictionary.
The type of the key-value pairs can be customized with the parameters
(see below).
.. note:: This method should only be used if the resulting Pandas DataFrame is expected
to be small, as all the data is loaded into the driver's memory.
Parameters
----------
orient : str {'dict', 'list', 'series', 'split', 'records', 'index'}
Determines the type of the values of the dictionary.
- 'dict' (default) : dict like {column -> {index -> value}}
- 'list' : dict like {column -> [values]}
- 'series' : dict like {column -> Series(values)}
- 'split' : dict like
{'index' -> [index], 'columns' -> [columns], 'data' -> [values]}
- 'records' : list like
[{column -> value}, ... , {column -> value}]
- 'index' : dict like {index -> {column -> value}}
Abbreviations are allowed. `s` indicates `series` and `sp`
indicates `split`.
into : class, default dict
The collections.abc.Mapping subclass used for all Mappings
in the return value. Can be the actual class or an empty
instance of the mapping type you want. If you want a
collections.defaultdict, you must pass it initialized.
Returns
-------
dict, list or collections.abc.Mapping
Return a collections.abc.Mapping object representing the DataFrame.
The resulting transformation depends on the `orient` parameter.
Examples
--------
>>> df = ks.DataFrame({'col1': [1, 2],
... 'col2': [0.5, 0.75]},
... index=['row1', 'row2'],
... columns=['col1', 'col2'])
>>> df
col1 col2
row1 1 0.50
row2 2 0.75
>>> df_dict = df.to_dict()
>>> sorted([(key, sorted(values.items())) for key, values in df_dict.items()])
[('col1', [('row1', 1), ('row2', 2)]), ('col2', [('row1', 0.5), ('row2', 0.75)])]
You can specify the return orientation.
>>> df_dict = df.to_dict('series')
>>> sorted(df_dict.items())
[('col1', row1 1
row2 2
Name: col1, dtype: int64), ('col2', row1 0.50
row2 0.75
Name: col2, dtype: float64)]
>>> df_dict = df.to_dict('split')
>>> sorted(df_dict.items()) # doctest: +ELLIPSIS
[('columns', ['col1', 'col2']), ('data', [[1..., 0.75]]), ('index', ['row1', 'row2'])]
>>> df_dict = df.to_dict('records')
>>> [sorted(values.items()) for values in df_dict] # doctest: +ELLIPSIS
[[('col1', 1...), ('col2', 0.5)], [('col1', 2...), ('col2', 0.75)]]
>>> df_dict = df.to_dict('index')
>>> sorted([(key, sorted(values.items())) for key, values in df_dict.items()])
[('row1', [('col1', 1), ('col2', 0.5)]), ('row2', [('col1', 2), ('col2', 0.75)])]
You can also specify the mapping type.
>>> from collections import OrderedDict, defaultdict
>>> df.to_dict(into=OrderedDict)
OrderedDict([('col1', OrderedDict([('row1', 1), ('row2', 2)])), \
('col2', OrderedDict([('row1', 0.5), ('row2', 0.75)]))])
If you want a `defaultdict`, you need to initialize it:
>>> dd = defaultdict(list)
>>> df.to_dict('records', into=dd) # doctest: +ELLIPSIS
[defaultdict(<class 'list'>, {'col..., 'col...}), \
defaultdict(<class 'list'>, {'col..., 'col...})]
"""
# Make sure locals() call is at the top of the function so we don't capture local variables.
args = locals()
kdf = self
return validate_arguments_and_invoke_function(
kdf.to_pandas(), self.to_dict, pd.DataFrame.to_dict, args)
def to_latex(self, buf=None, columns=None, col_space=None, header=True, index=True,
na_rep='NaN', formatters=None, float_format=None, sparsify=None, index_names=True,
bold_rows=False, column_format=None, longtable=None, escape=None, encoding=None,
decimal='.', multicolumn=None, multicolumn_format=None, multirow=None):
r"""
Render an object to a LaTeX tabular environment table.
Render an object to a tabular environment table. You can splice this into a LaTeX
document. Requires usepackage{booktabs}.
.. note:: This method should only be used if the resulting Pandas object is expected
to be small, as all the data is loaded into the driver's memory. If the input
is large, consider alternative formats.
Parameters
----------
buf : file descriptor or None
Buffer to write to. If None, the output is returned as a string.
columns : list of label, optional
The subset of columns to write. Writes all columns by default.
col_space : int, optional
The minimum width of each column.
header : bool or list of str, default True
Write out the column names. If a list of strings is given, it is assumed to be aliases
for the column names.
index : bool, default True
Write row names (index).
na_rep : str, default ‘NaN’
Missing data representation.
formatters : list of functions or dict of {str: function}, optional
Formatter functions to apply to columns’ elements by position or name. The result of
each function must be a unicode string. List must be of length equal to the number of
columns.
float_format : str, optional
Format string for floating point numbers.
sparsify : bool, optional
Set to False for a DataFrame with a hierarchical index to print every multiindex key at
each row. By default, the value will be read from the config module.
index_names : bool, default True
Prints the names of the indexes.
bold_rows : bool, default False
Make the row labels bold in the output.
column_format : str, optional
The columns format as specified in LaTeX table format e.g. ‘rcl’ for 3 columns. By
default, ‘l’ will be used for all columns except columns of numbers, which default
to ‘r’.
longtable : bool, optional
By default, the value will be read from the pandas config module. Use a longtable
environment instead of tabular. Requires adding a usepackage{longtable} to your LaTeX
preamble.
escape : bool, optional
By default, the value will be read from the pandas config module. When set to False
prevents from escaping latex special characters in column names.
encoding : str, optional
A string representing the encoding to use in the output file, defaults to ‘ascii’ on
Python 2 and ‘utf-8’ on Python 3.
decimal : str, default ‘.’
Character recognized as decimal separator, e.g. ‘,’ in Europe.
multicolumn : bool, default True
Use multicolumn to enhance MultiIndex columns. The default will be read from the config
module.
multicolumn_format : str, default ‘l’
The alignment for multicolumns, similar to column_format The default will be read from
the config module.
multirow : bool, default False
Use multirow to enhance MultiIndex rows. Requires adding a usepackage{multirow} to your
LaTeX preamble. Will print centered labels (instead of top-aligned) across the contained
rows, separating groups via clines. The default will be read from the pandas config
module.
Returns
-------
str or None
If buf is None, returns the resulting LateX format as a string. Otherwise returns None.
See Also
--------
DataFrame.to_string : Render a DataFrame to a console-friendly
tabular output.
DataFrame.to_html : Render a DataFrame as an HTML table.
Examples
--------
>>> df = ks.DataFrame({'name': ['Raphael', 'Donatello'],
... 'mask': ['red', 'purple'],
... 'weapon': ['sai', 'bo staff']},
... columns=['name', 'mask', 'weapon'])
>>> df.to_latex(index=False) # doctest: +NORMALIZE_WHITESPACE
'\\begin{tabular}{lll}\n\\toprule\n name & mask & weapon
\\\\\n\\midrule\n Raphael & red & sai \\\\\n Donatello &
purple & bo staff \\\\\n\\bottomrule\n\\end{tabular}\n'
"""
args = locals()
kdf = self
return validate_arguments_and_invoke_function(
kdf.to_pandas(), self.to_latex, pd.DataFrame.to_latex, args)
@property
def index(self):
"""The index (row labels) Column of the DataFrame.
Currently not supported when the DataFrame has no index.
See Also
--------
Index
"""
from databricks.koalas.indexes import Index, MultiIndex
if len(self._metadata.index_map) == 0:
return None
elif len(self._metadata.index_map) == 1:
return Index(self)
else:
return MultiIndex(self)
@property
def empty(self):
"""
Returns true if the current DataFrame is empty. Otherwise, returns false.
Examples
--------
>>> ks.range(10).empty
False
>>> ks.range(0).empty
True
>>> ks.DataFrame({}, index=list('abc')).empty
True
"""
return len(self._metadata.data_columns) == 0 or self._sdf.rdd.isEmpty()
def set_index(self, keys, drop=True, append=False, inplace=False):
"""Set the DataFrame index (row labels) using one or more existing columns.
Set the DataFrame index (row labels) using one or more existing
columns or arrays (of the correct length). The index can replace the
existing index or expand on it.
Parameters
----------
keys : label or array-like or list of labels/arrays
This parameter can be either a single column key, a single array of
the same length as the calling DataFrame, or a list containing an
arbitrary combination of column keys and arrays. Here, "array"
encompasses :class:`Series`, :class:`Index` and ``np.ndarray``.
drop : bool, default True
Delete columns to be used as the new index.
append : bool, default False
Whether to append columns to existing index.
inplace : bool, default False
Modify the DataFrame in place (do not create a new object).
Returns
-------
DataFrame
Changed row labels.
See Also
--------
DataFrame.reset_index : Opposite of set_index.
Examples
--------
>>> df = ks.DataFrame({'month': [1, 4, 7, 10],
... 'year': [2012, 2014, 2013, 2014],
... 'sale': [55, 40, 84, 31]},
... columns=['month', 'year', 'sale'])
>>> df
month year sale
0 1 2012 55
1 4 2014 40
2 7 2013 84
3 10 2014 31
Set the index to become the 'month' column:
>>> df.set_index('month') # doctest: +NORMALIZE_WHITESPACE
year sale
month
1 2012 55
4 2014 40
7 2013 84
10 2014 31
Create a MultiIndex using columns 'year' and 'month':
>>> df.set_index(['year', 'month']) # doctest: +NORMALIZE_WHITESPACE
sale
year month
2012 1 55
2014 4 40
2013 7 84
2014 10 31
"""
if isinstance(keys, str):
keys = [keys]
else:
keys = list(keys)
for key in keys:
if key not in self.columns:
raise KeyError(key)
if drop:
data_columns = [column for column in self._metadata.data_columns if column not in keys]
else:
data_columns = self._metadata.data_columns
if append:
index_map = self._metadata.index_map + [(column, column) for column in keys]
else:
index_map = [(column, column) for column in keys]
metadata = self._metadata.copy(data_columns=data_columns, index_map=index_map)
# Sync Spark's columns as well.
sdf = self._sdf.select(['`{}`'.format(name) for name in metadata.columns])
if inplace:
self._metadata = metadata
self._sdf = sdf
else:
kdf = self.copy()
kdf._metadata = metadata
kdf._sdf = sdf
return kdf
def reset_index(self, level=None, drop=False, inplace=False):
"""Reset the index, or a level of it.
For DataFrame with multi-level index, return new DataFrame with labeling information in
the columns under the index names, defaulting to 'level_0', 'level_1', etc. if any are None.
For a standard index, the index name will be used (if set), otherwise a default 'index' or
'level_0' (if 'index' is already taken) will be used.
Parameters
----------
level : int, str, tuple, or list, default None
Only remove the given levels from the index. Removes all levels by
default.
drop : bool, default False
Do not try to insert index into dataframe columns. This resets
the index to the default integer index.
inplace : bool, default False
Modify the DataFrame in place (do not create a new object).
Returns
-------
DataFrame
DataFrame with the new index.
See Also
--------
DataFrame.set_index : Opposite of reset_index.
Examples
--------
>>> df = ks.DataFrame([('bird', 389.0),
... ('bird', 24.0),
... ('mammal', 80.5),
... ('mammal', np.nan)],
... index=['falcon', 'parrot', 'lion', 'monkey'],
... columns=('class', 'max_speed'))
>>> df
class max_speed
falcon bird 389.0
parrot bird 24.0
lion mammal 80.5
monkey mammal NaN
When we reset the index, the old index is added as a column. Unlike pandas, Koalas
does not automatically add a sequential index. The following 0, 1, 2, 3 are only
there when we display the DataFrame.
>>> df.reset_index()
index class max_speed
0 falcon bird 389.0
1 parrot bird 24.0
2 lion mammal 80.5
3 monkey mammal NaN
We can use the `drop` parameter to avoid the old index being added as
a column:
>>> df.reset_index(drop=True)
class max_speed
0 bird 389.0
1 bird 24.0
2 mammal 80.5
3 mammal NaN
"""
# TODO: add example of MultiIndex back. See https://github.com/databricks/koalas/issues/301
if len(self._metadata.index_map) == 0:
raise NotImplementedError('Can\'t reset index because there is no index.')
multi_index = len(self._metadata.index_map) > 1
def rename(index):
if multi_index:
return 'level_{}'.format(index)
else:
if 'index' not in self._metadata.data_columns:
return 'index'
else:
return 'level_{}'.format(index)
if level is None:
new_index_map = [(column, name if name is not None else rename(i))
for i, (column, name) in enumerate(self._metadata.index_map)]
index_map = []
else:
if isinstance(level, (int, str)):
level = [level]
level = list(level)
if all(isinstance(l, int) for l in level):
for lev in level:
if lev >= len(self._metadata.index_map):
raise IndexError('Too many levels: Index has only {} level, not {}'
.format(len(self._metadata.index_map), lev + 1))
idx = level
elif all(isinstance(lev, str) for lev in level):
idx = []
for l in level:
try:
i = self._metadata.index_columns.index(l)
idx.append(i)
except ValueError:
if multi_index:
raise KeyError('Level unknown not found')
else:
raise KeyError('Level unknown must be same as name ({})'
.format(self._metadata.index_columns[0]))
else:
raise ValueError('Level should be all int or all string.')
idx.sort()
new_index_map = []
index_map = self._metadata.index_map.copy()
for i in idx:
info = self._metadata.index_map[i]
index_column, index_name = info
new_index_map.append(
(index_column,
index_name if index_name is not None else rename(index_name)))
index_map.remove(info)
if drop:
new_index_map = []
metadata = self._metadata.copy(
data_columns=[column for column, _ in new_index_map] + self._metadata.data_columns,
index_map=index_map)
columns = [name for _, name in new_index_map] + self._metadata.data_columns
if inplace:
self._metadata = metadata
self.columns = columns
else:
kdf = self.copy()
kdf._metadata = metadata
kdf.columns = columns
return kdf
def isnull(self):
"""
Detects missing values for items in the current Dataframe.
Return a boolean same-sized Dataframe indicating if the values are NA.
NA values, such as None or numpy.NaN, gets mapped to True values.
Everything else gets mapped to False values.
See Also
--------
Dataframe.notnull
Examples
--------
>>> df = ks.DataFrame([(.2, .3), (.0, None), (.6, None), (.2, .1)])
>>> df.isnull()
0 1
0 False False
1 False True
2 False True
3 False False
>>> df = ks.DataFrame([[None, 'bee', None], ['dog', None, 'fly']])
>>> df.isnull()
0 1 2
0 True False True
1 False True False
"""
kdf = self.copy()
for name, ks in kdf.iteritems():
kdf[name] = ks.isnull()
return kdf
isna = isnull
def notnull(self):
"""
Detects non-missing values for items in the current Dataframe.
This function takes a dataframe and indicates whether it's
values are valid (not missing, which is ``NaN`` in numeric
datatypes, ``None`` or ``NaN`` in objects and ``NaT`` in datetimelike).
See Also
--------
Dataframe.isnull
Examples
--------
>>> df = ks.DataFrame([(.2, .3), (.0, None), (.6, None), (.2, .1)])
>>> df.notnull()
0 1
0 True True
1 True False
2 True False
3 True True
>>> df = ks.DataFrame([['ant', 'bee', 'cat'], ['dog', None, 'fly']])
>>> df.notnull()
0 1 2
0 True True True
1 True False True
"""
kdf = self.copy()
for name, ks in kdf.iteritems():
kdf[name] = ks.notnull()
return kdf
notna = notnull
def nunique(self, axis: int = 0, dropna: bool = True, approx: bool = False,
rsd: float = 0.05) -> pd.Series:
"""
Return number of unique elements in the object.
Excludes NA values by default.
Parameters
----------
axis : int, default 0
Can only be set to 0 at the moment.
dropna : bool, default True
Don’t include NaN in the count.
approx: bool, default False
If False, will use the exact algorithm and return the exact number of unique.
If True, it uses the HyperLogLog approximate algorithm, which is significantly faster
for large amount of data.
Note: This parameter is specific to Koalas and is not found in pandas.
rsd: float, default 0.05
Maximum estimation error allowed in the HyperLogLog algorithm.
Note: Just like ``approx`` this parameter is specific to Koalas.
Returns
-------
The number of unique values per column as a pandas Series.
Examples
--------
>>> df = ks.DataFrame({'A': [1, 2, 3], 'B': [np.nan, 3, np.nan]})
>>> df.nunique()
A 3
B 1
Name: 0, dtype: int64
>>> df.nunique(dropna=False)
A 3
B 2
Name: 0, dtype: int64
On big data, we recommend using the approximate algorithm to speed up this function.
The result will be very close to the exact unique count.
>>> df.nunique(approx=True)
A 3
B 1
Name: 0, dtype: int64
"""
if axis != 0:
raise ValueError("The 'nunique' method only works with axis=0 at the moment")
count_fn = partial(F.approx_count_distinct, rsd=rsd) if approx else F.countDistinct
if dropna:
res = self._sdf.select([count_fn(Column(c))
.alias(c)
for c in self.columns])
else:
res = self._sdf.select([(count_fn(Column(c))
# If the count of null values in a column is at least 1,
# increase the total count by 1 else 0. This is like adding
# self.isnull().sum().clip(upper=1) but can be computed in a
# single Spark job when pulling it into the select statement.
+ F.when(F.count(F.when(F.col(c).isNull(), 1).otherwise(None))
>= 1, 1).otherwise(0))
.alias(c)
for c in self.columns])
return res.toPandas().T.iloc[:, 0]
def to_koalas(self):
"""
Converts the existing DataFrame into a Koalas DataFrame.
This method is monkey-patched into Spark's DataFrame and can be used
to convert a Spark DataFrame into a Koalas DataFrame. If running on
an existing Koalas DataFrame, the method returns itself.
If a Koalas DataFrame is converted to a Spark DataFrame and then back
to Koalas, it will lose the index information and the original index
will be turned into a normal column.
See Also
--------
DataFrame.to_spark
Examples
--------
>>> df = ks.DataFrame({'col1': [1, 2], 'col2': [3, 4]}, columns=['col1', 'col2'])
>>> df
col1 col2
0 1 3
1 2 4
>>> spark_df = df.to_spark()
>>> spark_df
DataFrame[__index_level_0__: bigint, col1: bigint, col2: bigint]
>>> kdf = spark_df.to_koalas()
>>> kdf
__index_level_0__ col1 col2
0 0 1 3
1 1 2 4
Calling to_koalas on a Koalas DataFrame simply returns itself.
>>> df.to_koalas()
col1 col2
0 1 3
1 2 4
"""
if isinstance(self, DataFrame):
return self
else:
return DataFrame(self)
def cache(self):
"""
Yields and caches the current DataFrame.
The Koalas DataFrame is yielded as a protected resource and its corresponding
data is cached which gets uncached after execution goes of the context.
Examples
--------
>>> df = ks.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)],
... columns=['dogs', 'cats'])
>>> df
dogs cats
0 0.2 0.3
1 0.0 0.6
2 0.6 0.0
3 0.2 0.1
>>> with df.cache() as cached_df:
... print(cached_df.count())
...
dogs 4
cats 4
dtype: int64
>>> df = df.cache()
>>> df.to_pandas().mean(axis=1)
0 0.25
1 0.30
2 0.30
3 0.15
dtype: float64
To uncache the dataframe, use `unpersist` function
>>> df.unpersist()
"""
return _CachedDataFrame(self._sdf, self._metadata)
def to_table(self, name: str, format: Optional[str] = None, mode: str = 'error',
partition_cols: Union[str, List[str], None] = None,
**options):
"""
Write the DataFrame into a Spark table.
Parameters
----------
name : str, required
Table name in Spark.
format : string, optional
Specifies the output data source format. Some common ones are:
- 'delta'
- 'parquet'
- 'orc'
- 'json'
- 'csv'
mode : str {'append', 'overwrite', 'ignore', 'error', 'errorifexists'}, default 'error'.
Specifies the behavior of the save operation when the table exists already.
- 'append': Append the new data to existing data.
- 'overwrite': Overwrite existing data.
- 'ignore': Silently ignore this operation if data already exists.
- 'error' or 'errorifexists': Throw an exception if data already exists.
partition_cols : str or list of str, optional, default None
Names of partitioning columns
options
Additional options passed directly to Spark.
See Also
--------
read_table
DataFrame.to_spark_io
DataFrame.to_parquet
Examples
--------
>>> df = ks.DataFrame(dict(
... date=list(pd.date_range('2012-1-1 12:00:00', periods=3, freq='M')),
... country=['KR', 'US', 'JP'],
... code=[1, 2 ,3]), columns=['date', 'country', 'code'])
>>> df
date country code
0 2012-01-31 12:00:00 KR 1
1 2012-02-29 12:00:00 US 2
2 2012-03-31 12:00:00 JP 3
>>> df.to_table('%s.my_table' % db, partition_cols='date')
"""
self._sdf.write.saveAsTable(name=name, format=format, mode=mode,
partitionBy=partition_cols, options=options)
def to_delta(self, path: str, mode: str = 'error',
partition_cols: Union[str, List[str], None] = None, **options):
"""
Write the DataFrame out as a Delta Lake table.
Parameters
----------
path : str, required
Path to write to.
mode : str {'append', 'overwrite', 'ignore', 'error', 'errorifexists'}, default 'error'.
Specifies the behavior of the save operation when the destination exists already.
- 'append': Append the new data to existing data.
- 'overwrite': Overwrite existing data.
- 'ignore': Silently ignore this operation if data already exists.
- 'error' or 'errorifexists': Throw an exception if data already exists.
partition_cols : str or list of str, optional, default None
Names of partitioning columns
options : dict
All other options passed directly into Delta Lake.
See Also
--------
read_delta
DataFrame.to_parquet
DataFrame.to_table
DataFrame.to_spark_io
Examples
--------
>>> df = ks.DataFrame(dict(
... date=list(pd.date_range('2012-1-1 12:00:00', periods=3, freq='M')),
... country=['KR', 'US', 'JP'],
... code=[1, 2 ,3]), columns=['date', 'country', 'code'])
>>> df
date country code
0 2012-01-31 12:00:00 KR 1
1 2012-02-29 12:00:00 US 2
2 2012-03-31 12:00:00 JP 3
Create a new Delta Lake table, partitioned by one column:
>>> df.to_delta('%s/to_delta/foo' % path, partition_cols='date')
Partitioned by two columns:
>>> df.to_delta('%s/to_delta/bar' % path, partition_cols=['date', 'country'])
Overwrite an existing table's partitions, using the 'replaceWhere' capability in Delta:
>>> df.to_delta('%s/to_delta/bar' % path,
... mode='overwrite', replaceWhere='date >= "2019-01-01"')
"""
self.to_spark_io(
path=path, mode=mode, format="delta", partition_cols=partition_cols, options=options)
def to_parquet(self, path: str, mode: str = 'error',
partition_cols: Union[str, List[str], None] = None,
compression: Optional[str] = None):
"""
Write the DataFrame out as a Parquet file or directory.
Parameters
----------
path : str, required
Path to write to.
mode : str {'append', 'overwrite', 'ignore', 'error', 'errorifexists'}, default 'error'.
Specifies the behavior of the save operation when the destination exists already.
- 'append': Append the new data to existing data.
- 'overwrite': Overwrite existing data.
- 'ignore': Silently ignore this operation if data already exists.
- 'error' or 'errorifexists': Throw an exception if data already exists.
partition_cols : str or list of str, optional, default None
Names of partitioning columns
compression : str {'none', 'uncompressed', 'snappy', 'gzip', 'lzo', 'brotli', 'lz4', 'zstd'}
Compression codec to use when saving to file. If None is set, it uses the
value specified in `spark.sql.parquet.compression.codec`.
See Also
--------
read_parquet
DataFrame.to_delta
DataFrame.to_table
DataFrame.to_spark_io
Examples
--------
>>> df = ks.DataFrame(dict(
... date=list(pd.date_range('2012-1-1 12:00:00', periods=3, freq='M')),
... country=['KR', 'US', 'JP'],
... code=[1, 2 ,3]), columns=['date', 'country', 'code'])
>>> df
date country code
0 2012-01-31 12:00:00 KR 1
1 2012-02-29 12:00:00 US 2
2 2012-03-31 12:00:00 JP 3
>>> df.to_parquet('%s/to_parquet/foo.parquet' % path, partition_cols='date')
>>> df.to_parquet(
... '%s/to_parquet/foo.parquet' % path,
... mode = 'overwrite',
... partition_cols=['date', 'country'])
"""
self._sdf.write.parquet(path=path, mode=mode, partitionBy=partition_cols,
compression=compression)
def to_spark_io(self, path: Optional[str] = None, format: Optional[str] = None,
mode: str = 'error', partition_cols: Union[str, List[str], None] = None,
**options):
"""Write the DataFrame out to a Spark data source.
Parameters
----------
path : string, optional
Path to the data source.
format : string, optional
Specifies the output data source format. Some common ones are:
- 'delta'
- 'parquet'
- 'orc'
- 'json'
- 'csv'
mode : str {'append', 'overwrite', 'ignore', 'error', 'errorifexists'}, default 'error'.
Specifies the behavior of the save operation when data already.
- 'append': Append the new data to existing data.
- 'overwrite': Overwrite existing data.
- 'ignore': Silently ignore this operation if data already exists.
- 'error' or 'errorifexists': Throw an exception if data already exists.
partition_cols : str or list of str, optional
Names of partitioning columns
options : dict
All other options passed directly into Spark's data source.
See Also
--------
read_spark_io
DataFrame.to_delta
DataFrame.to_parquet
DataFrame.to_table
Examples
--------
>>> df = ks.DataFrame(dict(
... date=list(pd.date_range('2012-1-1 12:00:00', periods=3, freq='M')),
... country=['KR', 'US', 'JP'],
... code=[1, 2 ,3]), columns=['date', 'country', 'code'])
>>> df
date country code
0 2012-01-31 12:00:00 KR 1
1 2012-02-29 12:00:00 US 2
2 2012-03-31 12:00:00 JP 3
>>> df.to_spark_io(path='%s/to_spark_io/foo.json' % path, format='json')
"""
self._sdf.write.save(path=path, format=format, mode=mode, partitionBy=partition_cols,
options=options)
def to_spark(self):
"""
Return the current DataFrame as a Spark DataFrame.
See Also
--------
DataFrame.to_koalas
"""
return self._internal.spark_df
def to_pandas(self):
"""
Return a Pandas DataFrame.
.. note:: This method should only be used if the resulting Pandas DataFrame is expected
to be small, as all the data is loaded into the driver's memory.
Examples
--------
>>> df = ks.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)],
... columns=['dogs', 'cats'])
>>> df.to_pandas()
dogs cats
0 0.2 0.3
1 0.0 0.6
2 0.6 0.0
3 0.2 0.1
"""
return self._internal.pandas_df.copy()
# Alias to maintain backward compatibility with Spark
toPandas = to_pandas
def assign(self, **kwargs):
"""
Assign new columns to a DataFrame.
Returns a new object with all original columns in addition to new ones.
Existing columns that are re-assigned will be overwritten.
Parameters
----------
**kwargs : dict of {str: callable or Series}
The column names are keywords. If the values are
callable, they are computed on the DataFrame and
assigned to the new columns. The callable must not
change input DataFrame (though Koalas doesn't check it).
If the values are not callable, (e.g. a Series or a literal),
they are simply assigned.
Returns
-------
DataFrame
A new DataFrame with the new columns in addition to
all the existing columns.
Examples
--------
>>> df = ks.DataFrame({'temp_c': [17.0, 25.0]},
... index=['Portland', 'Berkeley'])
>>> df
temp_c
Portland 17.0
Berkeley 25.0
Where the value is a callable, evaluated on `df`:
>>> df.assign(temp_f=lambda x: x.temp_c * 9 / 5 + 32)
temp_c temp_f
Portland 17.0 62.6
Berkeley 25.0 77.0
Alternatively, the same behavior can be achieved by directly
referencing an existing Series or sequence and you can also
create multiple columns within the same assign.
>>> assigned = df.assign(temp_f=df['temp_c'] * 9 / 5 + 32,
... temp_k=df['temp_c'] + 273.15)
>>> assigned[['temp_c', 'temp_f', 'temp_k']]
temp_c temp_f temp_k
Portland 17.0 62.6 290.15
Berkeley 25.0 77.0 298.15
Notes
-----
Assigning multiple columns within the same ``assign`` is possible
but you cannot refer to newly created or modified columns. This
feature is supported in pandas for Python 3.6 and later but not in
Koalas. In Koalas, all items are computed first, and then assigned.
"""
from databricks.koalas.series import Series
for k, v in kwargs.items():
if not (isinstance(v, (Series, spark.Column)) or
callable(v) or pd.api.types.is_scalar(v)):
raise TypeError("Column assignment doesn't support type "
"{0}".format(type(v).__name__))
if callable(v):
kwargs[k] = v(self)
pairs = list(kwargs.items())
sdf = self._sdf
for (name, c) in pairs:
if isinstance(c, Series):
sdf = sdf.withColumn(name, c._scol)
elif isinstance(c, Column):
sdf = sdf.withColumn(name, c)
else:
sdf = sdf.withColumn(name, F.lit(c))
data_columns = self._metadata.data_columns
metadata = self._metadata.copy(
data_columns=(data_columns +
[name for name, _ in pairs if name not in data_columns]))
return DataFrame(sdf, metadata)
@staticmethod
def from_records(data: Union[np.array, List[tuple], dict, pd.DataFrame],
index: Union[str, list, np.array] = None, exclude: list = None,
columns: list = None, coerce_float: bool = False, nrows: int = None) \
-> 'DataFrame':
"""
Convert structured or record ndarray to DataFrame.
Parameters
----------
data : ndarray (structured dtype), list of tuples, dict, or DataFrame
index : string, list of fields, array-like
Field of array to use as the index, alternately a specific set of input labels to use
exclude : sequence, default None
Columns or fields to exclude
columns : sequence, default None
Column names to use. If the passed data do not have names associated with them, this
argument provides names for the columns. Otherwise this argument indicates the order of
the columns in the result (any names not found in the data will become all-NA columns)
coerce_float : boolean, default False
Attempt to convert values of non-string, non-numeric objects (like decimal.Decimal) to
floating point, useful for SQL result sets
nrows : int, default None
Number of rows to read if data is an iterator
Returns
-------
df : DataFrame
Examples
--------
Use dict as input
>>> ks.DataFrame.from_records({'A': [1, 2, 3]})
A
0 1
1 2
2 3
Use list of tuples as input
>>> ks.DataFrame.from_records([(1, 2), (3, 4)])
0 1
0 1 2
1 3 4
Use NumPy array as input
>>> ks.DataFrame.from_records(np.eye(3))
0 1 2
0 1.0 0.0 0.0
1 0.0 1.0 0.0
2 0.0 0.0 1.0
"""
return DataFrame(pd.DataFrame.from_records(data, index, exclude, columns, coerce_float,
nrows))
def to_records(self, index=True, convert_datetime64=None,
column_dtypes=None, index_dtypes=None):
"""
Convert DataFrame to a NumPy record array.
Index will be included as the first field of the record array if
requested.
.. note:: This method should only be used if the resulting NumPy ndarray is
expected to be small, as all the data is loaded into the driver's memory.
Parameters
----------
index : bool, default True
Include index in resulting record array, stored in 'index'
field or using the index label, if set.
convert_datetime64 : bool, default None
Whether to convert the index to datetime.datetime if it is a
DatetimeIndex.
column_dtypes : str, type, dict, default None
If a string or type, the data type to store all columns. If
a dictionary, a mapping of column names and indices (zero-indexed)
to specific data types.
index_dtypes : str, type, dict, default None
If a string or type, the data type to store all index levels. If
a dictionary, a mapping of index level names and indices
(zero-indexed) to specific data types.
This mapping is applied only if `index=True`.
Returns
-------
numpy.recarray
NumPy ndarray with the DataFrame labels as fields and each row
of the DataFrame as entries.
See Also
--------
DataFrame.from_records: Convert structured or record ndarray
to DataFrame.
numpy.recarray: An ndarray that allows field access using
attributes, analogous to typed columns in a
spreadsheet.
Examples
--------
>>> df = ks.DataFrame({'A': [1, 2], 'B': [0.5, 0.75]},
... index=['a', 'b'])
>>> df
A B
a 1 0.50
b 2 0.75
>>> df.to_records() # doctest: +SKIP
rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)],
dtype=[('index', 'O'), ('A', '<i8'), ('B', '<f8')])
The index can be excluded from the record array:
>>> df.to_records(index=False) # doctest: +SKIP
rec.array([(1, 0.5 ), (2, 0.75)],
dtype=[('A', '<i8'), ('B', '<f8')])
Specification of dtype for columns is new in Pandas 0.24.0.
Data types can be specified for the columns:
>>> df.to_records(column_dtypes={"A": "int32"}) # doctest: +SKIP
rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)],
dtype=[('index', 'O'), ('A', '<i4'), ('B', '<f8')])
Specification of dtype for index is new in Pandas 0.24.0.
Data types can also be specified for the index:
>>> df.to_records(index_dtypes="<S2") # doctest: +SKIP
rec.array([(b'a', 1, 0.5 ), (b'b', 2, 0.75)],
dtype=[('index', 'S2'), ('A', '<i8'), ('B', '<f8')])
"""
args = locals()
kdf = self
return validate_arguments_and_invoke_function(
kdf.to_pandas(), self.to_records, pd.DataFrame.to_records, args)
def copy(self) -> 'DataFrame':
"""
Make a copy of this object's indices and data.
Returns
-------
copy : DataFrame
"""
return DataFrame(self._sdf, self._metadata.copy())
def dropna(self, axis=0, how='any', thresh=None, subset=None, inplace=False):
"""
Remove missing values.
Parameters
----------
axis : {0 or 'index'}, default 0
Determine if rows or columns which contain missing values are
removed.
* 0, or 'index' : Drop rows which contain missing values.
how : {'any', 'all'}, default 'any'
Determine if row or column is removed from DataFrame, when we have
at least one NA or all NA.
* 'any' : If any NA values are present, drop that row or column.
* 'all' : If all values are NA, drop that row or column.
thresh : int, optional
Require that many non-NA values.
subset : array-like, optional
Labels along other axis to consider, e.g. if you are dropping rows
these would be a list of columns to include.
inplace : bool, default False
If True, do operation inplace and return None.
Returns
-------
DataFrame
DataFrame with NA entries dropped from it.
See Also
--------
DataFrame.drop : Drop specified labels from columns.
DataFrame.isnull: Indicate missing values.
DataFrame.notnull : Indicate existing (non-missing) values.
Examples
--------
>>> df = ks.DataFrame({"name": ['Alfred', 'Batman', 'Catwoman'],
... "toy": [None, 'Batmobile', 'Bullwhip'],
... "born": [None, "1940-04-25", None]},
... columns=['name', 'toy', 'born'])
>>> df
name toy born
0 Alfred None None
1 Batman Batmobile 1940-04-25
2 Catwoman Bullwhip None
Drop the rows where at least one element is missing.
>>> df.dropna()
name toy born
1 Batman Batmobile 1940-04-25
Drop the rows where all elements are missing.
>>> df.dropna(how='all')
name toy born
0 Alfred None None
1 Batman Batmobile 1940-04-25
2 Catwoman Bullwhip None
Keep only the rows with at least 2 non-NA values.
>>> df.dropna(thresh=2)
name toy born
1 Batman Batmobile 1940-04-25
2 Catwoman Bullwhip None
Define in which columns to look for missing values.
>>> df.dropna(subset=['name', 'born'])
name toy born
1 Batman Batmobile 1940-04-25
Keep the DataFrame with valid entries in the same variable.
>>> df.dropna(inplace=True)
>>> df
name toy born
1 Batman Batmobile 1940-04-25
"""
if axis == 0 or axis == 'index':
if subset is not None:
if isinstance(subset, str):
columns = [subset]
else:
columns = list(subset)
invalids = [column for column in columns
if column not in self._metadata.data_columns]
if len(invalids) > 0:
raise KeyError(invalids)
else:
columns = list(self.columns)
cnt = reduce(lambda x, y: x + y,
[F.when(self[column].notna()._scol, 1).otherwise(0)
for column in columns],
F.lit(0))
if thresh is not None:
pred = cnt >= F.lit(int(thresh))
elif how == 'any':
pred = cnt == F.lit(len(columns))
elif how == 'all':
pred = cnt > F.lit(0)
else:
if how is not None:
raise ValueError('invalid how option: {h}'.format(h=how))
else:
raise TypeError('must specify how or thresh')
sdf = self._sdf.filter(pred)
if inplace:
self._sdf = sdf
else:
return DataFrame(sdf, self._metadata.copy())
else:
raise NotImplementedError("dropna currently only works for axis=0 or axis='index'")
def fillna(self, value=None, axis=None, inplace=False):
"""Fill NA/NaN values.
Parameters
----------
value : scalar, dict, Series
Value to use to fill holes. alternately a dict/Series of values
specifying which value to use for each column.
DataFrame is not supported.
axis : {0 or `index`}
1 and `columns` are not supported.
inplace : boolean, default False
Fill in place (do not create a new object)
Returns
-------
DataFrame
DataFrame with NA entries filled.
Examples
--------
>>> df = ks.DataFrame({
... 'A': [None, 3, None, None],
... 'B': [2, 4, None, 3],
... 'C': [None, None, None, 1],
... 'D': [0, 1, 5, 4]
... },
... columns=['A', 'B', 'C', 'D'])
>>> df
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 NaN NaN NaN 5
3 NaN 3.0 1.0 4
Replace all NaN elements with 0s.
>>> df.fillna(0)
A B C D
0 0.0 2.0 0.0 0
1 3.0 4.0 0.0 1
2 0.0 0.0 0.0 5
3 0.0 3.0 1.0 4
Replace all NaN elements in column 'A', 'B', 'C', and 'D', with 0, 1,
2, and 3 respectively.
>>> values = {'A': 0, 'B': 1, 'C': 2, 'D': 3}
>>> df.fillna(value=values)
A B C D
0 0.0 2.0 2.0 0
1 3.0 4.0 2.0 1
2 0.0 1.0 2.0 5
3 0.0 3.0 1.0 4
"""
if axis is None:
axis = 0
if not (axis == 0 or axis == "index"):
raise NotImplementedError("fillna currently only works for axis=0 or axis='index'")
if value is None:
raise ValueError('Currently must specify value')
if not isinstance(value, (float, int, str, bool, dict, pd.Series)):
raise TypeError("Unsupported type %s" % type(value))
if isinstance(value, pd.Series):
value = value.to_dict()
if isinstance(value, dict):
for v in value.values():
if not isinstance(v, (float, int, str, bool)):
raise TypeError("Unsupported type %s" % type(v))
sdf = self._sdf.fillna(value)
if inplace:
self._sdf = sdf
else:
return DataFrame(sdf, self._metadata.copy())
def clip(self, lower: Union[float, int] = None, upper: Union[float, int] = None) \
-> 'DataFrame':
"""
Trim values at input threshold(s).
Assigns values outside boundary to boundary values.
Parameters
----------
lower : float or int, default None
Minimum threshold value. All values below this threshold will be set to it.
upper : float or int, default None
Maximum threshold value. All values above this threshold will be set to it.
Returns
-------
DataFrame
DataFrame with the values outside the clip boundaries replaced.
Examples
--------
>>> ks.DataFrame({'A': [0, 2, 4]}).clip(1, 3)
A
0 1
1 2
2 3
Notes
-----
One difference between this implementation and pandas is that running
pd.DataFrame({'A': ['a', 'b']}).clip(0, 1) will crash with "TypeError: '<=' not supported
between instances of 'str' and 'int'" while ks.DataFrame({'A': ['a', 'b']}).clip(0, 1)
will output the original DataFrame, simply ignoring the incompatible types.
"""
if is_list_like(lower) or is_list_like(upper):
raise ValueError("List-like value are not supported for 'lower' and 'upper' at the " +
"moment")
if lower is None and upper is None:
return self
sdf = self._sdf
numeric_types = (DecimalType, DoubleType, FloatType, ByteType, IntegerType, LongType,
ShortType)
numeric_columns = [c for c in self.columns
if isinstance(sdf.schema[c].dataType, numeric_types)]
nonnumeric_columns = [c for c in self.columns
if not isinstance(sdf.schema[c].dataType, numeric_types)]
if lower is not None:
sdf = sdf.select(*[F.when(F.col(c) < lower, lower).otherwise(F.col(c)).alias(c)
for c in numeric_columns] + nonnumeric_columns)
if upper is not None:
sdf = sdf.select(*[F.when(F.col(c) > upper, upper).otherwise(F.col(c)).alias(c)
for c in numeric_columns] + nonnumeric_columns)
# Restore initial column order
sdf = sdf.select(list(self.columns))
return ks.DataFrame(sdf)
def head(self, n=5):
"""
Return the first `n` rows.
This function returns the first `n` rows for the object based
on position. It is useful for quickly testing if your object
has the right type of data in it.
Parameters
----------
n : int, default 5
Number of rows to select.
Returns
-------
obj_head : same type as caller
The first `n` rows of the caller object.
Examples
--------
>>> df = ks.DataFrame({'animal':['alligator', 'bee', 'falcon', 'lion',
... 'monkey', 'parrot', 'shark', 'whale', 'zebra']})
>>> df
animal
0 alligator
1 bee
2 falcon
3 lion
4 monkey
5 parrot
6 shark
7 whale
8 zebra
Viewing the first 5 lines
>>> df.head()
animal
0 alligator
1 bee
2 falcon
3 lion
4 monkey
Viewing the first `n` lines (three in this case)
>>> df.head(3)
animal
0 alligator
1 bee
2 falcon
"""
return DataFrame(self._sdf.limit(n), self._metadata.copy())
@property
def columns(self):
"""The column labels of the DataFrame."""
return pd.Index(self._metadata.data_columns)
@columns.setter
def columns(self, names):
old_names = self._metadata.data_columns
if len(old_names) != len(names):
raise ValueError(
"Length mismatch: Expected axis has %d elements, new values have %d elements"
% (len(old_names), len(names)))
sdf = self._sdf.select(self._metadata.index_columns +
[self[old_name]._scol.alias(new_name)
for (old_name, new_name) in zip(old_names, names)])
self._sdf = sdf
self._metadata = self._metadata.copy(data_columns=names)
@property
def dtypes(self):
"""Return the dtypes in the DataFrame.
This returns a Series with the data type of each column. The result's index is the original
DataFrame's columns. Columns with mixed types are stored with the object dtype.
Returns
-------
pd.Series
The data type of each column.
Examples
--------
>>> df = ks.DataFrame({'a': list('abc'),
... 'b': list(range(1, 4)),
... 'c': np.arange(3, 6).astype('i1'),
... 'd': np.arange(4.0, 7.0, dtype='float64'),
... 'e': [True, False, True],
... 'f': pd.date_range('20130101', periods=3)},
... columns=['a', 'b', 'c', 'd', 'e', 'f'])
>>> df.dtypes
a object
b int64
c int8
d float64
e bool
f datetime64[ns]
dtype: object
"""
return pd.Series([self[col].dtype for col in self._metadata.data_columns],
index=self._metadata.data_columns)
def count(self):
"""
Count non-NA cells for each column.
The values `None`, `NaN` are considered NA.
Returns
-------
pandas.Series
See Also
--------
Series.count: Number of non-NA elements in a Series.
DataFrame.shape: Number of DataFrame rows and columns (including NA
elements).
DataFrame.isna: Boolean same-sized DataFrame showing places of NA
elements.
Examples
--------
Constructing DataFrame from a dictionary:
>>> df = ks.DataFrame({"Person":
... ["John", "Myla", "Lewis", "John", "Myla"],
... "Age": [24., np.nan, 21., 33, 26],
... "Single": [False, True, True, True, False]},
... columns=["Person", "Age", "Single"])
>>> df
Person Age Single
0 John 24.0 False
1 Myla NaN True
2 Lewis 21.0 True
3 John 33.0 True
4 Myla 26.0 False
Notice the uncounted NA values:
>>> df.count()
Person 5
Age 4
Single 5
dtype: int64
"""
return self._reduce_for_stat_function(_Frame._count_expr, numeric_only=False)
def drop(self, labels=None, axis=1, columns: Union[str, List[str]] = None):
"""
Drop specified labels from columns.
Remove columns by specifying label names and axis=1 or columns.
When specifying both labels and columns, only labels will be dropped.
Removing rows is yet to be implemented.
Parameters
----------
labels : single label or list-like
Column labels to drop.
axis : {1 or 'columns'}, default 1
.. dropna currently only works for axis=1 'columns'
axis=0 is yet to be implemented.
columns : single label or list-like
Alternative to specifying axis (``labels, axis=1``
is equivalent to ``columns=labels``).
Returns
-------
dropped : DataFrame
See Also
--------
Series.dropna
Examples
--------
>>> df = ks.DataFrame({'x': [1, 2], 'y': [3, 4], 'z': [5, 6], 'w': [7, 8]},
... columns=['x', 'y', 'z', 'w'])
>>> df
x y z w
0 1 3 5 7
1 2 4 6 8
>>> df.drop('x', axis=1)
y z w
0 3 5 7
1 4 6 8
>>> df.drop(['y', 'z'], axis=1)
x w
0 1 7
1 2 8
>>> df.drop(columns=['y', 'z'])
x w
0 1 7
1 2 8
Notes
-----
Currently only axis = 1 is supported in this function,
axis = 0 is yet to be implemented.
"""
if labels is not None:
axis = self._validate_axis(axis)
if axis == 1:
return self.drop(columns=labels)
raise NotImplementedError("Drop currently only works for axis=1")
elif columns is not None:
if isinstance(columns, str):
columns = [columns]
sdf = self._sdf.drop(*columns)
metadata = self._metadata.copy(
data_columns=[column for column in self.columns if column not in columns]
)
return DataFrame(sdf, metadata)
else:
raise ValueError("Need to specify at least one of 'labels' or 'columns'")
def get(self, key, default=None):
"""
Get item from object for given key (DataFrame column, Panel slice,
etc.). Returns default value if not found.
Parameters
----------
key : object
Returns
-------
value : same type as items contained in object
Examples
--------
>>> df = ks.DataFrame({'x':range(3), 'y':['a','b','b'], 'z':['a','b','b']},
... columns=['x', 'y', 'z'])
>>> df
x y z
0 0 a a
1 1 b b
2 2 b b
>>> df.get('x')
0 0
1 1
2 2
Name: x, dtype: int64
>>> df.get(['x', 'y'])
x y
0 0 a
1 1 b
2 2 b
"""
try:
return self._pd_getitem(key)
except (KeyError, ValueError, IndexError):
return default
def sort_values(self, by: Union[str, List[str]], ascending: Union[bool, List[bool]] = True,
inplace: bool = False, na_position: str = 'last') -> Optional['DataFrame']:
"""
Sort by the values along either axis.
Parameters
----------
by : str or list of str
ascending : bool or list of bool, default True
Sort ascending vs. descending. Specify list for multiple sort
orders. If this is a list of bools, must match the length of
the by.
inplace : bool, default False
if True, perform operation in-place
na_position : {'first', 'last'}, default 'last'
`first` puts NaNs at the beginning, `last` puts NaNs at the end
Returns
-------
sorted_obj : DataFrame
Examples
--------
>>> df = ks.DataFrame({
... 'col1': ['A', 'B', None, 'D', 'C'],
... 'col2': [2, 9, 8, 7, 4],
... 'col3': [0, 9, 4, 2, 3],
... },
... columns=['col1', 'col2', 'col3'])
>>> df
col1 col2 col3
0 A 2 0
1 B 9 9
2 None 8 4
3 D 7 2
4 C 4 3
Sort by col1
>>> df.sort_values(by=['col1'])
col1 col2 col3
0 A 2 0
1 B 9 9
4 C 4 3
3 D 7 2
2 None 8 4
Sort Descending
>>> df.sort_values(by='col1', ascending=False)
col1 col2 col3
3 D 7 2
4 C 4 3
1 B 9 9
0 A 2 0
2 None 8 4
Sort by multiple columns
>>> df = ks.DataFrame({
... 'col1': ['A', 'A', 'B', None, 'D', 'C'],
... 'col2': [2, 1, 9, 8, 7, 4],
... 'col3': [0, 1, 9, 4, 2, 3],
... },
... columns=['col1', 'col2', 'col3'])
>>> df.sort_values(by=['col1', 'col2'])
col1 col2 col3
1 A 1 1
0 A 2 0
2 B 9 9
5 C 4 3
4 D 7 2
3 None 8 4
"""
if isinstance(by, str):
by = [by]
if isinstance(ascending, bool):
ascending = [ascending] * len(by)
if len(ascending) != len(by):
raise ValueError('Length of ascending ({}) != length of by ({})'
.format(len(ascending), len(by)))
if na_position not in ('first', 'last'):
raise ValueError("invalid na_position: '{}'".format(na_position))
# Mapper: Get a spark column function for (ascending, na_position) combination
# Note that 'asc_nulls_first' and friends were added as of Spark 2.4, see SPARK-23847.
mapper = {
(True, 'first'): lambda x: Column(getattr(x._jc, "asc_nulls_first")()),
(True, 'last'): lambda x: Column(getattr(x._jc, "asc_nulls_last")()),
(False, 'first'): lambda x: Column(getattr(x._jc, "desc_nulls_first")()),
(False, 'last'): lambda x: Column(getattr(x._jc, "desc_nulls_last")()),
}
by = [mapper[(asc, na_position)](self[colname]._scol)
for colname, asc in zip(by, ascending)]
kdf = DataFrame(self._sdf.sort(*by), self._metadata.copy())
if inplace:
self._sdf = kdf._sdf
self._metadata = kdf._metadata
return None
else:
return kdf
def sort_index(self, axis: int = 0, level: int = None, ascending: bool = True,
inplace: bool = False, kind: str = None, na_position: str = 'last') \
-> Optional['DataFrame']:
"""
Sort object by labels (along an axis)
Parameters
----------
axis : index, columns to direct sorting. Currently, only axis = 0 is supported.
level : int or level name or list of ints or list of level names
if not None, sort on values in specified index level(s)
ascending : boolean, default True
Sort ascending vs. descending
inplace : bool, default False
if True, perform operation in-place
kind : str, default None
Koalas does not allow specifying the sorting algorithm at the moment, default None
na_position : {‘first’, ‘last’}, default ‘last’
first puts NaNs at the beginning, last puts NaNs at the end. Not implemented for
MultiIndex.
Returns
-------
sorted_obj : DataFrame
Examples
--------
>>> df = ks.DataFrame({'A': [2, 1, np.nan]}, index=['b', 'a', np.nan])
>>> df.sort_index()
A
a 1.0
b 2.0
NaN NaN
>>> df.sort_index(ascending=False)
A
b 2.0
a 1.0
NaN NaN
>>> df.sort_index(na_position='first')
A
NaN NaN
a 1.0
b 2.0
>>> df.sort_index(inplace=True)
>>> df
A
a 1.0
b 2.0
NaN NaN
>>> ks.DataFrame({'A': range(4), 'B': range(4)[::-1]},
... index=[['b', 'b', 'a', 'a'], [1, 0, 1, 0]]).sort_index()
A B
a 0 3 0
1 2 1
b 0 1 2
1 0 3
"""
if axis != 0:
raise ValueError("No other axes than 0 are supported at the moment")
if level is not None:
raise ValueError("The 'axis' argument is not supported at the moment")
if kind is not None:
raise ValueError("Specifying the sorting algorithm is supported at the moment.")
return self.sort_values(by=self._metadata.index_columns, ascending=ascending,
inplace=inplace, na_position=na_position)
# TODO: add keep = First
def nlargest(self, n: int, columns: 'Any') -> 'DataFrame':
"""
Return the first `n` rows ordered by `columns` in descending order.
Return the first `n` rows with the largest values in `columns`, in
descending order. The columns that are not specified are returned as
well, but not used for ordering.
This method is equivalent to
``df.sort_values(columns, ascending=False).head(n)``, but more
performant in Pandas.
In Koalas, thanks to Spark's lazy execution and query optimizer,
the two would have same performance.
Parameters
----------
n : int
Number of rows to return.
columns : label or list of labels
Column label(s) to order by.
Returns
-------
DataFrame
The first `n` rows ordered by the given columns in descending
order.
See Also
--------
DataFrame.nsmallest : Return the first `n` rows ordered by `columns` in
ascending order.
DataFrame.sort_values : Sort DataFrame by the values.
DataFrame.head : Return the first `n` rows without re-ordering.
Notes
-----
This function cannot be used with all column types. For example, when
specifying columns with `object` or `category` dtypes, ``TypeError`` is
raised.
Examples
--------
>>> df = ks.DataFrame({'X': [1, 2, 3, 5, 6, 7, np.nan],
... 'Y': [6, 7, 8, 9, 10, 11, 12]})
>>> df
X Y
0 1.0 6
1 2.0 7
2 3.0 8
3 5.0 9
4 6.0 10
5 7.0 11
6 NaN 12
In the following example, we will use ``nlargest`` to select the three
rows having the largest values in column "population".
>>> df.nlargest(n=3, columns='X')
X Y
5 7.0 11
4 6.0 10
3 5.0 9
>>> df.nlargest(n=3, columns=['Y', 'X'])
X Y
6 NaN 12
5 7.0 11
4 6.0 10
"""
kdf = self.sort_values(by=columns, ascending=False) # type: Optional[DataFrame]
assert kdf is not None
return kdf.head(n=n)
# TODO: add keep = First
def nsmallest(self, n: int, columns: 'Any') -> 'DataFrame':
"""
Return the first `n` rows ordered by `columns` in ascending order.
Return the first `n` rows with the smallest values in `columns`, in
ascending order. The columns that are not specified are returned as
well, but not used for ordering.
This method is equivalent to ``df.sort_values(columns, ascending=True).head(n)``,
but more performant. In Koalas, thanks to Spark's lazy execution and query optimizer,
the two would have same performance.
Parameters
----------
n : int
Number of items to retrieve.
columns : list or str
Column name or names to order by.
Returns
-------
DataFrame
See Also
--------
DataFrame.nlargest : Return the first `n` rows ordered by `columns` in
descending order.
DataFrame.sort_values : Sort DataFrame by the values.
DataFrame.head : Return the first `n` rows without re-ordering.
Examples
--------
>>> df = ks.DataFrame({'X': [1, 2, 3, 5, 6, 7, np.nan],
... 'Y': [6, 7, 8, 9, 10, 11, 12]})
>>> df
X Y
0 1.0 6
1 2.0 7
2 3.0 8
3 5.0 9
4 6.0 10
5 7.0 11
6 NaN 12
In the following example, we will use ``nsmallest`` to select the
three rows having the smallest values in column "a".
>>> df.nsmallest(n=3, columns='X') # doctest: +NORMALIZE_WHITESPACE
X Y
0 1.0 6
1 2.0 7
2 3.0 8
To order by the largest values in column "a" and then "c", we can
specify multiple columns like in the next example.
>>> df.nsmallest(n=3, columns=['Y', 'X']) # doctest: +NORMALIZE_WHITESPACE
X Y
0 1.0 6
1 2.0 7
2 3.0 8
"""
kdf = self.sort_values(by=columns, ascending=True) # type: Optional[DataFrame]
assert kdf is not None
return kdf.head(n=n)
def isin(self, values):
"""
Whether each element in the DataFrame is contained in values.
Parameters
----------
values : iterable or dict
The sequence of values to test. If values is a dict,
the keys must be the column names, which must match.
Series and DataFrame are not supported.
Returns
-------
DataFrame
DataFrame of booleans showing whether each element in the DataFrame
is contained in values.
Examples
--------
>>> df = ks.DataFrame({'num_legs': [2, 4], 'num_wings': [2, 0]},
... index=['falcon', 'dog'],
... columns=['num_legs', 'num_wings'])
>>> df
num_legs num_wings
falcon 2 2
dog 4 0
When ``values`` is a list check whether every value in the DataFrame
is present in the list (which animals have 0 or 2 legs or wings)
>>> df.isin([0, 2])
num_legs num_wings
falcon True True
dog False True
When ``values`` is a dict, we can pass values to check for each
column separately:
>>> df.isin({'num_wings': [0, 3]})
num_legs num_wings
falcon False False
dog False True
"""
if isinstance(values, (pd.DataFrame, pd.Series)):
raise NotImplementedError("DataFrame and Series are not supported")
if isinstance(values, dict) and not set(values.keys()).issubset(self.columns):
raise AttributeError(
"'DataFrame' object has no attribute %s"
% (set(values.keys()).difference(self.columns)))
_select_columns = self._metadata.index_columns
if isinstance(values, dict):
for col in self.columns:
if col in values:
_select_columns.append(self[col]._scol.isin(values[col]).alias(col))
else:
_select_columns.append(F.lit(False).alias(col))
elif is_list_like(values):
_select_columns += [
self[col]._scol.isin(list(values)).alias(col) for col in self.columns]
else:
raise TypeError('Values should be iterable, Series, DataFrame or dict.')
return DataFrame(self._sdf.select(_select_columns), self._metadata.copy())
@property
def shape(self):
"""
Return a tuple representing the dimensionality of the DataFrame.
Examples
--------
>>> df = ks.DataFrame({'col1': [1, 2], 'col2': [3, 4]})
>>> df.shape
(2, 2)
>>> df = ks.DataFrame({'col1': [1, 2], 'col2': [3, 4],
... 'col3': [5, 6]})
>>> df.shape
(2, 3)
"""
return len(self), len(self.columns)
def merge(left, right: 'DataFrame', how: str = 'inner',
on: Optional[Union[str, List[str]]] = None,
left_on: Optional[Union[str, List[str]]] = None,
right_on: Optional[Union[str, List[str]]] = None,
left_index: bool = False, right_index: bool = False,
suffixes: Tuple[str, str] = ('_x', '_y')) -> 'DataFrame':
"""
Merge DataFrame objects with a database-style join.
The index of the resulting DataFrame will be one of the following:
- 0...n if no index is used for merging
- Index of the left DataFrame if merged only on the index of the right DataFrame
- Index of the right DataFrame if merged only on the index of the left DataFrame
- All involved indices if merged using the indices of both DataFrames
e.g. if `left` with indices (a, x) and `right` with indices (b, x), the result will
be an index (x, a, b)
Parameters
----------
right: Object to merge with.
how: Type of merge to be performed.
{‘left’, ‘right’, ‘outer’, ‘inner’}, default ‘inner’
left: use only keys from left frame, similar to a SQL left outer join; preserve key
order.
right: use only keys from right frame, similar to a SQL right outer join; preserve key
order.
outer: use union of keys from both frames, similar to a SQL full outer join; sort keys
lexicographically.
inner: use intersection of keys from both frames, similar to a SQL inner join;
preserve the order of the left keys.
on: Column or index level names to join on. These must be found in both DataFrames. If on
is None and not merging on indexes then this defaults to the intersection of the
columns in both DataFrames.
left_on: Column or index level names to join on in the left DataFrame. Can also
be an array or list of arrays of the length of the left DataFrame.
These arrays are treated as if they are columns.
right_on: Column or index level names to join on in the right DataFrame. Can also
be an array or list of arrays of the length of the right DataFrame.
These arrays are treated as if they are columns.
left_index: Use the index from the left DataFrame as the join key(s). If it is a
MultiIndex, the number of keys in the other DataFrame (either the index or a number of
columns) must match the number of levels.
right_index: Use the index from the right DataFrame as the join key. Same caveats as
left_index.
suffixes: Suffix to apply to overlapping column names in the left and right side,
respectively.
Returns
-------
DataFrame
A DataFrame of the two merged objects.
Examples
--------
>>> df1 = ks.DataFrame({'lkey': ['foo', 'bar', 'baz', 'foo'],
... 'value': [1, 2, 3, 5]},
... columns=['lkey', 'value'])
>>> df2 = ks.DataFrame({'rkey': ['foo', 'bar', 'baz', 'foo'],
... 'value': [5, 6, 7, 8]},
... columns=['rkey', 'value'])
>>> df1
lkey value
0 foo 1
1 bar 2
2 baz 3
3 foo 5
>>> df2
rkey value
0 foo 5
1 bar 6
2 baz 7
3 foo 8
Merge df1 and df2 on the lkey and rkey columns. The value columns have
the default suffixes, _x and _y, appended.
>>> merged = df1.merge(df2, left_on='lkey', right_on='rkey')
>>> merged.sort_values(by=['lkey', 'value_x', 'rkey', 'value_y'])
lkey value_x rkey value_y
0 bar 2 bar 6
1 baz 3 baz 7
2 foo 1 foo 5
3 foo 1 foo 8
4 foo 5 foo 5
5 foo 5 foo 8
>>> left_kdf = ks.DataFrame({'A': [1, 2]})
>>> right_kdf = ks.DataFrame({'B': ['x', 'y']}, index=[1, 2])
>>> left_kdf.merge(right_kdf, left_index=True, right_index=True)
A B
1 2 x
>>> left_kdf.merge(right_kdf, left_index=True, right_index=True, how='left')
A B
0 1 None
1 2 x
>>> left_kdf.merge(right_kdf, left_index=True, right_index=True, how='right')
A B
1 2.0 x
2 NaN y
>>> left_kdf.merge(right_kdf, left_index=True, right_index=True, how='outer')
A B
0 1.0 None
1 2.0 x
2 NaN y
Notes
-----
As described in #263, joining string columns currently returns None for missing values
instead of NaN.
"""
_to_list = lambda o: o if o is None or is_list_like(o) else [o]
if on:
if left_on or right_on:
raise ValueError('Can only pass argument "on" OR "left_on" and "right_on", '
'not a combination of both.')
left_keys = _to_list(on)
right_keys = _to_list(on)
else:
# TODO: need special handling for multi-index.
if left_index:
left_keys = left._metadata.index_columns
else:
left_keys = _to_list(left_on)
if right_index:
right_keys = right._metadata.index_columns
else:
right_keys = _to_list(right_on)
if left_keys and not right_keys:
raise ValueError('Must pass right_on or right_index=True')
if right_keys and not left_keys:
raise ValueError('Must pass left_on or left_index=True')
if not left_keys and not right_keys:
common = list(left.columns.intersection(right.columns))
if len(common) == 0:
raise ValueError(
'No common columns to perform merge on. Merge options: '
'left_on=None, right_on=None, left_index=False, right_index=False')
left_keys = common
right_keys = common
if len(left_keys) != len(right_keys): # type: ignore
raise ValueError('len(left_keys) must equal len(right_keys)')
if how == 'full':
warnings.warn("Warning: While Koalas will accept 'full', you should use 'outer' " +
"instead to be compatible with the pandas merge API", UserWarning)
if how == 'outer':
# 'outer' in pandas equals 'full' in Spark
how = 'full'
if how not in ('inner', 'left', 'right', 'full'):
raise ValueError("The 'how' parameter has to be amongst the following values: ",
"['inner', 'left', 'right', 'outer']")
left_table = left._sdf.alias('left_table')
right_table = right._sdf.alias('right_table')
left_key_columns = [left_table[col] for col in left_keys] # type: ignore
right_key_columns = [right_table[col] for col in right_keys] # type: ignore
join_condition = reduce(lambda x, y: x & y,
[lkey == rkey for lkey, rkey
in zip(left_key_columns, right_key_columns)])
joined_table = left_table.join(right_table, join_condition, how=how)
# Unpack suffixes tuple for convenience
left_suffix = suffixes[0]
right_suffix = suffixes[1]
# Append suffixes to columns with the same name to avoid conflicts later
duplicate_columns = (set(left._metadata.data_columns)
& set(right._metadata.data_columns))
left_index_columns = set(left._metadata.index_columns)
right_index_columns = set(right._metadata.index_columns)
exprs = []
for col in left_table.columns:
if col in left_index_columns:
continue
scol = left_table[col]
if col in duplicate_columns:
if col in left_keys and col in right_keys:
pass
else:
col = col + left_suffix
scol = scol.alias(col)
exprs.append(scol)
for col in right_table.columns:
if col in right_index_columns:
continue
scol = right_table[col]
if col in duplicate_columns:
if col in left_keys and col in right_keys:
continue
else:
col = col + right_suffix
scol = scol.alias(col)
exprs.append(scol)
# Retain indices if they are used for joining
if left_index:
if right_index:
exprs.extend(['left_table.%s' % col for col in left_index_columns])
exprs.extend(['right_table.%s' % col for col in right_index_columns])
index_map = left._metadata.index_map + [idx for idx in right._metadata.index_map
if idx not in left._metadata.index_map]
else:
exprs.extend(['right_table.%s' % col for col in right_index_columns])
index_map = right._metadata.index_map
elif right_index:
exprs.extend(['left_table.%s' % col for col in left_index_columns])
index_map = left._metadata.index_map
else:
index_map = []
selected_columns = joined_table.select(*exprs)
# Merge left and right indices after the join by replacing missing values in the left index
# with values from the right index and dropping
if (how == 'right' or how == 'full') and right_index:
for left_index_col, right_index_col in zip(left._metadata.index_columns,
right._metadata.index_columns):
selected_columns = selected_columns.withColumn(
'left_table.' + left_index_col,
F.when(F.col('left_table.%s' % left_index_col).isNotNull(),
F.col('left_table.%s' % left_index_col))
.otherwise(F.col('right_table.%s' % right_index_col))
).withColumnRenamed(
'left_table.%s' % left_index_col, left_index_col
).drop(F.col('left_table.%s' % left_index_col))
if not(left_index and not right_index):
selected_columns = selected_columns.drop(*[F.col('right_table.%s' % right_index_col)
for right_index_col in right_index_columns
if right_index_col in left_index_columns])
if index_map:
data_columns = [c for c in selected_columns.columns
if c not in [idx[0] for idx in index_map]]
return DataFrame(selected_columns, index=Metadata(data_columns, index_map))
else:
return DataFrame(selected_columns)
def append(self, other: 'DataFrame', ignore_index: bool = False,
verify_integrity: bool = False, sort: bool = False) -> 'DataFrame':
"""
Append rows of other to the end of caller, returning a new object.
Columns in other that are not in the caller are added as new columns.
Parameters
----------
other : DataFrame or Series/dict-like object, or list of these
The data to append.
ignore_index : boolean, default False
If True, do not use the index labels.
verify_integrity : boolean, default False
If True, raise ValueError on creating index with duplicates.
sort : boolean, default False
Currently not supported.
Returns
-------
appended : DataFrame
Examples
--------
>>> df = ks.DataFrame([[1, 2], [3, 4]], columns=list('AB'))
>>> df.append(df)
A B
0 1 2
1 3 4
0 1 2
1 3 4
>>> df.append(df, ignore_index=True)
A B
0 1 2
1 3 4
2 1 2
3 3 4
"""
if isinstance(other, ks.Series):
raise ValueError("DataFrames.append() does not support appending Series to DataFrames")
if sort:
raise ValueError("The 'sort' parameter is currently not supported")
if not ignore_index:
index_columns = self._metadata.index_columns
if len(index_columns) != len(other._metadata.index_columns):
raise ValueError("Both DataFrames have to have the same number of index levels")
if verify_integrity and len(index_columns) > 0:
if (self._sdf.select(index_columns)
.intersect(other._sdf.select(other._metadata.index_columns))
.count()) > 0:
raise ValueError("Indices have overlapping values")
# Lazy import to avoid circular dependency issues
from databricks.koalas.namespace import concat
return concat([self, other], ignore_index=ignore_index)
def sample(self, n: Optional[int] = None, frac: Optional[float] = None, replace: bool = False,
random_state: Optional[int] = None) -> 'DataFrame':
"""
Return a random sample of items from an axis of object.
Please call this function using named argument by specifing the ``frac`` argument.
You can use `random_state` for reproducibility. However, note that different from pandas,
specifying a seed in Koalas/Spark does not guarantee the sampled rows will be fixed. The
result set depends on not only the seed, but also how the data is distributed across
machines and to some extent network randomness when shuffle operations are involved. Even
in the simplest case, the result set will depend on the system's CPU core count.
Parameters
----------
n : int, optional
Number of items to return. This is currently NOT supported. Use frac instead.
frac : float, optional
Fraction of axis items to return.
replace : bool, default False
Sample with or without replacement.
random_state : int, optional
Seed for the random number generator (if int).
Returns
-------
Series or DataFrame
A new object of same type as caller containing the sampled items.
Examples
--------
>>> df = ks.DataFrame({'num_legs': [2, 4, 8, 0],
... 'num_wings': [2, 0, 0, 0],
... 'num_specimen_seen': [10, 2, 1, 8]},
... index=['falcon', 'dog', 'spider', 'fish'],
... columns=['num_legs', 'num_wings', 'num_specimen_seen'])
>>> df # doctest: +SKIP
num_legs num_wings num_specimen_seen
falcon 2 2 10
dog 4 0 2
spider 8 0 1
fish 0 0 8
A random 25% sample of the ``DataFrame``.
Note that we use `random_state` to ensure the reproducibility of
the examples.
>>> df.sample(frac=0.25, random_state=1) # doctest: +SKIP
num_legs num_wings num_specimen_seen
falcon 2 2 10
fish 0 0 8
Extract 25% random elements from the ``Series`` ``df['num_legs']``, with replacement,
so the same items could appear more than once.
>>> df['num_legs'].sample(frac=0.4, replace=True, random_state=1) # doctest: +SKIP
falcon 2
spider 8
spider 8
Name: num_legs, dtype: int64
Specifying the exact number of items to return is not supported at the moment.
>>> df.sample(n=5) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
NotImplementedError: Function sample currently does not support specifying ...
"""
# Note: we don't run any of the doctests because the result can change depending on the
# system's core count.
if n is not None:
raise NotImplementedError("Function sample currently does not support specifying "
"exact number of items to return. Use frac instead.")
if frac is None:
raise ValueError("frac must be specified.")
sdf = self._sdf.sample(withReplacement=replace, fraction=frac, seed=random_state)
return DataFrame(sdf, self._metadata.copy())
def astype(self, dtype) -> 'DataFrame':
"""
Cast a pandas object to a specified dtype ``dtype``.
Parameters
----------
dtype : data type, or dict of column name -> data type
Use a numpy.dtype or Python type to cast entire pandas object to
the same type. Alternatively, use {col: dtype, ...}, where col is a
column label and dtype is a numpy.dtype or Python type to cast one
or more of the DataFrame's columns to column-specific types.
Returns
-------
casted : same type as caller
See Also
--------
to_datetime : Convert argument to datetime.
Examples
--------
>>> df = ks.DataFrame({'a': [1, 2, 3], 'b': [1, 2, 3]}, dtype='int64')
>>> df
a b
0 1 1
1 2 2
2 3 3
Convert to float type:
>>> df.astype('float')
a b
0 1.0 1.0
1 2.0 2.0
2 3.0 3.0
Convert to int64 type back:
>>> df.astype('int64')
a b
0 1 1
1 2 2
2 3 3
Convert column a to float type:
>>> df.astype({'a': float})
a b
0 1.0 1
1 2.0 2
2 3.0 3
"""
results = []
if is_dict_like(dtype):
for col_name in dtype.keys():
if col_name not in self.columns:
raise KeyError('Only a column name can be used for the '
'key in a dtype mappings argument.')
for col_name, col in self.iteritems():
if col_name in dtype:
results.append(col.astype(dtype=dtype[col_name]))
else:
results.append(col)
else:
for col_name, col in self.iteritems():
results.append(col.astype(dtype=dtype))
sdf = self._sdf.select(
self._metadata.index_columns + list(map(lambda ser: ser._scol, results)))
return DataFrame(sdf, self._metadata.copy())
def add_prefix(self, prefix):
"""
Prefix labels with string `prefix`.
For Series, the row labels are prefixed.
For DataFrame, the column labels are prefixed.
Parameters
----------
prefix : str
The string to add before each label.
Returns
-------
DataFrame
New DataFrame with updated labels.
See Also
--------
Series.add_prefix: Prefix row labels with string `prefix`.
Series.add_suffix: Suffix row labels with string `suffix`.
DataFrame.add_suffix: Suffix column labels with string `suffix`.
Examples
--------
>>> df = ks.DataFrame({'A': [1, 2, 3, 4], 'B': [3, 4, 5, 6]}, columns=['A', 'B'])
>>> df
A B
0 1 3
1 2 4
2 3 5
3 4 6
>>> df.add_prefix('col_')
col_A col_B
0 1 3
1 2 4
2 3 5
3 4 6
"""
assert isinstance(prefix, str)
data_columns = self._metadata.data_columns
metadata = self._metadata.copy(data_columns=[prefix + name for name in data_columns])
sdf = self._sdf.select(self._metadata.index_columns +
[self[name]._scol.alias(prefix + name)
for name in self._metadata.data_columns])
return DataFrame(sdf, metadata)
def add_suffix(self, suffix):
"""
Suffix labels with string `suffix`.
For Series, the row labels are suffixed.
For DataFrame, the column labels are suffixed.
Parameters
----------
suffix : str
The string to add before each label.
Returns
-------
DataFrame
New DataFrame with updated labels.
See Also
--------
Series.add_prefix: Prefix row labels with string `prefix`.
Series.add_suffix: Suffix row labels with string `suffix`.
DataFrame.add_prefix: Prefix column labels with string `prefix`.
Examples
--------
>>> df = ks.DataFrame({'A': [1, 2, 3, 4], 'B': [3, 4, 5, 6]}, columns=['A', 'B'])
>>> df
A B
0 1 3
1 2 4
2 3 5
3 4 6
>>> df.add_suffix('_col')
A_col B_col
0 1 3
1 2 4
2 3 5
3 4 6
"""
assert isinstance(suffix, str)
data_columns = self._metadata.data_columns
metadata = self._metadata.copy(data_columns=[name + suffix for name in data_columns])
sdf = self._sdf.select(self._metadata.index_columns +
[self[name]._scol.alias(name + suffix)
for name in self._metadata.data_columns])
return DataFrame(sdf, metadata)
# TODO: include, and exclude should be implemented.
def describe(self, percentiles: Optional[List[float]] = None) -> 'DataFrame':
"""
Generate descriptive statistics that summarize the central tendency,
dispersion and shape of a dataset's distribution, excluding
``NaN`` values.
Analyzes both numeric and object series, as well
as ``DataFrame`` column sets of mixed data types. The output
will vary depending on what is provided. Refer to the notes
below for more detail.
Parameters
----------
percentiles : list of ``float`` in range [0.0, 1.0], default [0.25, 0.5, 0.75]
A list of percentiles to be computed.
Returns
-------
Series or DataFrame
Summary statistics of the Series or Dataframe provided.
See Also
--------
DataFrame.count: Count number of non-NA/null observations.
DataFrame.max: Maximum of the values in the object.
DataFrame.min: Minimum of the values in the object.
DataFrame.mean: Mean of the values.
DataFrame.std: Standard deviation of the obersvations.
Notes
-----
For numeric data, the result's index will include ``count``,
``mean``, ``std``, ``min``, ``25%``, ``50%``, ``75%``, ``max``.
Currently only numeric data is supported.
Examples
--------
Describing a numeric ``Series``.
>>> s = ks.Series([1, 2, 3])
>>> s.describe()
count 3.0
mean 2.0
std 1.0
min 1.0
25% 1.0
50% 2.0
75% 3.0
max 3.0
Name: 0, dtype: float64
Describing a ``DataFrame``. Only numeric fields are returned.
>>> df = ks.DataFrame({'numeric1': [1, 2, 3],
... 'numeric2': [4.0, 5.0, 6.0],
... 'object': ['a', 'b', 'c']
... },
... columns=['numeric1', 'numeric2', 'object'])
>>> df.describe()
numeric1 numeric2
count 3.0 3.0
mean 2.0 5.0
std 1.0 1.0
min 1.0 4.0
25% 1.0 4.0
50% 2.0 5.0
75% 3.0 6.0
max 3.0 6.0
Describing a ``DataFrame`` and selecting custom percentiles.
>>> df = ks.DataFrame({'numeric1': [1, 2, 3],
... 'numeric2': [4.0, 5.0, 6.0]
... },
... columns=['numeric1', 'numeric2'])
>>> df.describe(percentiles = [0.85, 0.15])
numeric1 numeric2
count 3.0 3.0
mean 2.0 5.0
std 1.0 1.0
min 1.0 4.0
15% 1.0 4.0
50% 2.0 5.0
85% 3.0 6.0
max 3.0 6.0
Describing a column from a ``DataFrame`` by accessing it as
an attribute.
>>> df.numeric1.describe()
count 3.0
mean 2.0
std 1.0
min 1.0
25% 1.0
50% 2.0
75% 3.0
max 3.0
Name: numeric1, dtype: float64
Describing a column from a ``DataFrame`` by accessing it as
an attribute and selecting custom percentiles.
>>> df.numeric1.describe(percentiles = [0.85, 0.15])
count 3.0
mean 2.0
std 1.0
min 1.0
15% 1.0
50% 2.0
85% 3.0
max 3.0
Name: numeric1, dtype: float64
"""
exprs = []
data_columns = []
for col in self.columns:
kseries = self[col]
spark_type = kseries.spark_type
if isinstance(spark_type, DoubleType) or isinstance(spark_type, FloatType):
exprs.append(F.nanvl(kseries._scol, F.lit(None)).alias(kseries.name))
data_columns.append(kseries.name)
elif isinstance(spark_type, NumericType):
exprs.append(kseries._scol)
data_columns.append(kseries.name)
if len(exprs) == 0:
raise ValueError("Cannot describe a DataFrame without columns")
if percentiles is not None:
if any((p < 0.0) or (p > 1.0) for p in percentiles):
raise ValueError("Percentiles should all be in the interval [0, 1]")
# appending 50% if not in percentiles already
percentiles = (percentiles + [0.5]) if 0.5 not in percentiles else percentiles
else:
percentiles = [0.25, 0.5, 0.75]
formatted_perc = ["{:.0%}".format(p) for p in sorted(percentiles)]
stats = ["count", "mean", "stddev", "min", *formatted_perc, "max"]
sdf = self._sdf.select(*exprs).summary(stats)
return DataFrame(sdf.replace("stddev", "std", subset='summary'),
index=Metadata(data_columns=data_columns,
index_map=[('summary', None)])).astype('float64')
# TODO: implements 'keep' parameters
def drop_duplicates(self, subset=None, inplace=False):
"""
Return DataFrame with duplicate rows removed, optionally only
considering certain columns.
Parameters
----------
subset : column label or sequence of labels, optional
Only consider certain columns for identifying duplicates, by
default use all of the columns
inplace : boolean, default False
Whether to drop duplicates in place or to return a copy
Returns
-------
DataFrame
>>> df = ks.DataFrame(
... {'a': [1, 2, 2, 2, 3], 'b': ['a', 'a', 'a', 'c', 'd']}, columns = ['a', 'b'])
>>> df
a b
0 1 a
1 2 a
2 2 a
3 2 c
4 3 d
>>> df.drop_duplicates().sort_values(['a', 'b'])
a b
0 1 a
1 2 a
3 2 c
4 3 d
>>> df.drop_duplicates('a').sort_values(['a', 'b'])
a b
0 1 a
1 2 a
4 3 d
>>> df.drop_duplicates(['a', 'b']).sort_values(['a', 'b'])
a b
0 1 a
1 2 a
3 2 c
4 3 d
"""
if subset is None:
subset = self._metadata.data_columns
elif not isinstance(subset, list):
subset = [subset]
sdf = self._sdf.drop_duplicates(subset=subset)
if inplace:
self._sdf = sdf
else:
return DataFrame(sdf, self._metadata.copy())
def _pd_getitem(self, key):
from databricks.koalas.series import Series
if key is None:
raise KeyError("none key")
if isinstance(key, str):
try:
return Series(self._sdf.__getitem__(key), anchor=self,
index=self._metadata.index_map)
except AnalysisException:
raise KeyError(key)
if np.isscalar(key) or isinstance(key, (tuple, str)):
raise NotImplementedError(key)
elif isinstance(key, slice):
return self.loc[key]
if isinstance(key, (pd.Series, np.ndarray, pd.Index)):
raise NotImplementedError(key)
if isinstance(key, list):
return self.loc[:, key]
if isinstance(key, DataFrame):
# TODO Should not implement alignment, too dangerous?
return Series(self._sdf.__getitem__(key), anchor=self, index=self._metadata.index_map)
if isinstance(key, Series):
# TODO Should not implement alignment, too dangerous?
# It is assumed to be only a filter, otherwise .loc should be used.
bcol = key._scol.cast("boolean")
return DataFrame(self._sdf.filter(bcol), self._metadata.copy())
raise NotImplementedError(key)
def __repr__(self):
pdf = self.head(max_display_count + 1).to_pandas()
pdf_length = len(pdf)
repr_string = repr(pdf.iloc[:max_display_count])
if pdf_length > max_display_count:
match = REPR_PATTERN.search(repr_string)
if match is not None:
nrows = match.group("rows")
ncols = match.group("columns")
footer = ("\n\n[Showing only the first {nrows} rows x {ncols} columns]"
.format(nrows=nrows, ncols=ncols))
return REPR_PATTERN.sub(footer, repr_string)
return repr_string
def _repr_html_(self):
pdf = self.head(max_display_count + 1).to_pandas()
pdf_length = len(pdf)
repr_html = pdf[:max_display_count]._repr_html_()
if pdf_length > max_display_count:
match = REPR_HTML_PATTERN.search(repr_html)
if match is not None:
nrows = match.group("rows")
ncols = match.group("columns")
by = chr(215)
footer = ('\n<p>Showing only the first {rows} rows {by} {cols} columns</p>\n</div>'
.format(rows=nrows,
by=by,
cols=ncols))
return REPR_HTML_PATTERN.sub(footer, repr_html)
return repr_html
def __getitem__(self, key):
return self._pd_getitem(key)
def __setitem__(self, key, value):
from databricks.koalas.series import Series
# For now, we don't support realignment against different dataframes.
# This is too expensive in Spark.
# Are we assigning against a column?
if isinstance(value, Series):
assert value._kdf is self, \
"Cannot combine column argument because it comes from a different dataframe"
if isinstance(key, (tuple, list)):
assert isinstance(value.schema, StructType)
field_names = value.schema.fieldNames()
kdf = self.assign(**{k: value[c] for k, c in zip(key, field_names)})
else:
kdf = self.assign(**{key: value})
self._sdf = kdf._sdf
self._metadata = kdf._metadata
def __getattr__(self, key: str) -> Any:
from databricks.koalas.series import Series
if key.startswith("__") or key.startswith("_pandas_") or key.startswith("_spark_"):
raise AttributeError(key)
if hasattr(_MissingPandasLikeDataFrame, key):
property_or_func = getattr(_MissingPandasLikeDataFrame, key)
if isinstance(property_or_func, property):
return property_or_func.fget(self) # type: ignore
else:
return partial(property_or_func, self)
return Series(self._sdf.__getattr__(key), anchor=self, index=self._metadata.index_map)
def __len__(self):
return self._sdf.count()
def __dir__(self):
fields = [f for f in self._sdf.schema.fieldNames() if ' ' not in f]
return super(DataFrame, self).__dir__() + fields
@classmethod
def _validate_axis(cls, axis=0):
if axis not in (0, 1, 'index', 'columns', None):
raise ValueError('No axis named {0}'.format(axis))
# convert to numeric axis
return {None: 0, 'index': 0, 'columns': 1}.get(axis, axis)
def _reduce_spark_multi(sdf, aggs):
"""
Performs a reduction on a dataframe, the functions being known sql aggregate functions.
"""
assert isinstance(sdf, spark.DataFrame)
sdf0 = sdf.agg(*aggs)
l = sdf0.head(2)
assert len(l) == 1, (sdf, l)
row = l[0]
l2 = list(row)
assert len(l2) == len(aggs), (row, l2)
return l2
class _CachedDataFrame(DataFrame):
"""
Cached Koalas DataFrame, which corresponds to Pandas DataFrame logically, but internally
it caches the corresponding Spark DataFrame.
"""
def __init__(self, sdf, metadata):
self._cached = sdf.cache()
super(_CachedDataFrame, self).__init__(self._cached, index=metadata)
def __enter__(self):
return self
def __exit__(self, exception_type, exception_value, traceback):
self.unpersist()
def unpersist(self):
"""
The `unpersist` function is used to uncache the Koalas DataFrame when it
is not used with `with` statement.
Examples
--------
>>> df = ks.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)],
... columns=['dogs', 'cats'])
>>> df = df.cache()
To uncache the dataframe, use `unpersist` function
>>> df.unpersist()
"""
if self._cached.is_cached:
self._cached.unpersist()
| 1 | 10,128 | I think he meant the docstring in this constructor. yea we should fix | databricks-koalas | py |
@@ -150,7 +150,18 @@ public abstract class BinaryDictionary implements Dictionary {
throw new IllegalStateException("unknown resource scheme " + resourceScheme);
}
}
-
+
+ public static InputStream getResource(ResourceScheme scheme, String path) throws IOException {
+ switch(scheme) {
+ case CLASSPATH:
+ return getClassResource(path);
+ case FILE:
+ return Files.newInputStream(Paths.get(path));
+ default:
+ throw new IllegalStateException("unknown resource scheme " + scheme);
+ }
+ }
+
// util, reused by ConnectionCosts and CharacterDefinition
public static InputStream getClassResource(Class<?> clazz, String suffix) throws IOException {
final InputStream is = clazz.getResourceAsStream(clazz.getSimpleName() + suffix); | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.lucene.analysis.ko.dict;
import java.io.BufferedInputStream;
import java.io.EOFException;
import java.io.IOException;
import java.io.FileNotFoundException;
import java.io.InputStream;
import java.nio.ByteBuffer;
import java.nio.channels.Channels;
import java.nio.channels.ReadableByteChannel;
import java.nio.file.Files;
import java.nio.file.Paths;
import org.apache.lucene.analysis.ko.POS;
import org.apache.lucene.codecs.CodecUtil;
import org.apache.lucene.store.DataInput;
import org.apache.lucene.store.InputStreamDataInput;
import org.apache.lucene.util.IOUtils;
import org.apache.lucene.util.IntsRef;
/**
* Base class for a binary-encoded in-memory dictionary.
*/
public abstract class BinaryDictionary implements Dictionary {
/**
* Used to specify where (dictionary) resources get loaded from.
*/
public enum ResourceScheme {
CLASSPATH, FILE
}
public static final String TARGETMAP_FILENAME_SUFFIX = "$targetMap.dat";
public static final String DICT_FILENAME_SUFFIX = "$buffer.dat";
public static final String POSDICT_FILENAME_SUFFIX = "$posDict.dat";
public static final String DICT_HEADER = "ko_dict";
public static final String TARGETMAP_HEADER = "ko_dict_map";
public static final String POSDICT_HEADER = "ko_dict_pos";
public static final int VERSION = 1;
private final ResourceScheme resourceScheme;
private final String resourcePath;
private final ByteBuffer buffer;
private final int[] targetMapOffsets, targetMap;
private final POS.Tag[] posDict;
protected BinaryDictionary() throws IOException {
this(ResourceScheme.CLASSPATH, null);
}
/**
* @param resourceScheme - scheme for loading resources (FILE or CLASSPATH).
* @param resourcePath - where to load resources (dictionaries) from. If null, with CLASSPATH scheme only, use
* this class's name as the path.
*/
protected BinaryDictionary(ResourceScheme resourceScheme, String resourcePath) throws IOException {
this.resourceScheme = resourceScheme;
if (resourcePath == null) {
if (resourceScheme != ResourceScheme.CLASSPATH) {
throw new IllegalArgumentException("resourcePath must be supplied with FILE resource scheme");
}
this.resourcePath = getClass().getName().replace('.', '/');
} else {
this.resourcePath = resourcePath;
}
InputStream mapIS = null, dictIS = null, posIS = null;
int[] targetMapOffsets, targetMap;
ByteBuffer buffer;
try {
mapIS = getResource(TARGETMAP_FILENAME_SUFFIX);
mapIS = new BufferedInputStream(mapIS);
DataInput in = new InputStreamDataInput(mapIS);
CodecUtil.checkHeader(in, TARGETMAP_HEADER, VERSION, VERSION);
targetMap = new int[in.readVInt()];
targetMapOffsets = new int[in.readVInt()];
int accum = 0, sourceId = 0;
for (int ofs = 0; ofs < targetMap.length; ofs++) {
final int val = in.readVInt();
if ((val & 0x01) != 0) {
targetMapOffsets[sourceId] = ofs;
sourceId++;
}
accum += val >>> 1;
targetMap[ofs] = accum;
}
if (sourceId + 1 != targetMapOffsets.length)
throw new IOException("targetMap file format broken; targetMap.length=" + targetMap.length
+ ", targetMapOffsets.length=" + targetMapOffsets.length
+ ", sourceId=" + sourceId);
targetMapOffsets[sourceId] = targetMap.length;
mapIS.close(); mapIS = null;
posIS = getResource(POSDICT_FILENAME_SUFFIX);
posIS = new BufferedInputStream(posIS);
in = new InputStreamDataInput(posIS);
CodecUtil.checkHeader(in, POSDICT_HEADER, VERSION, VERSION);
int posSize = in.readVInt();
posDict = new POS.Tag[posSize];
for (int j = 0; j < posSize; j++) {
posDict[j] = POS.resolveTag(in.readByte());
}
posIS.close(); posIS = null;
dictIS = getResource(DICT_FILENAME_SUFFIX);
// no buffering here, as we load in one large buffer
in = new InputStreamDataInput(dictIS);
CodecUtil.checkHeader(in, DICT_HEADER, VERSION, VERSION);
final int size = in.readVInt();
final ByteBuffer tmpBuffer = ByteBuffer.allocateDirect(size);
final ReadableByteChannel channel = Channels.newChannel(dictIS);
final int read = channel.read(tmpBuffer);
if (read != size) {
throw new EOFException("Cannot read whole dictionary");
}
dictIS.close(); dictIS = null;
buffer = tmpBuffer.asReadOnlyBuffer();
} finally {
IOUtils.closeWhileHandlingException(mapIS, posIS, dictIS);
}
this.targetMap = targetMap;
this.targetMapOffsets = targetMapOffsets;
this.buffer = buffer;
}
protected final InputStream getResource(String suffix) throws IOException {
switch(resourceScheme) {
case CLASSPATH:
return getClassResource(resourcePath + suffix);
case FILE:
return Files.newInputStream(Paths.get(resourcePath + suffix));
default:
throw new IllegalStateException("unknown resource scheme " + resourceScheme);
}
}
// util, reused by ConnectionCosts and CharacterDefinition
public static InputStream getClassResource(Class<?> clazz, String suffix) throws IOException {
final InputStream is = clazz.getResourceAsStream(clazz.getSimpleName() + suffix);
if (is == null) {
throw new FileNotFoundException("Not in classpath: " + clazz.getName().replace('.', '/') + suffix);
}
return is;
}
private InputStream getClassResource(String path) throws IOException {
final InputStream is = BinaryDictionary.class.getClassLoader().getResourceAsStream(path);
if (is == null) {
throw new FileNotFoundException("Not in classpath: " + path);
}
return is;
}
public void lookupWordIds(int sourceId, IntsRef ref) {
ref.ints = targetMap;
ref.offset = targetMapOffsets[sourceId];
// targetMapOffsets always has one more entry pointing behind last:
ref.length = targetMapOffsets[sourceId + 1] - ref.offset;
}
@Override
public int getLeftId(int wordId) {
return buffer.getShort(wordId) >>> 2;
}
@Override
public int getRightId(int wordId) {
return buffer.getShort(wordId+2) >>> 2; // Skip left id
}
@Override
public int getWordCost(int wordId) {
return buffer.getShort(wordId + 4); // Skip left and right id
}
@Override
public POS.Type getPOSType(int wordId) {
byte value = (byte) (buffer.getShort(wordId) & 3);
return POS.resolveType(value);
}
@Override
public POS.Tag getLeftPOS(int wordId) {
return posDict[getLeftId(wordId)];
}
@Override
public POS.Tag getRightPOS(int wordId) {
POS.Type type = getPOSType(wordId);
if (type == POS.Type.MORPHEME || type == POS.Type.COMPOUND || hasSinglePOS(wordId)) {
return getLeftPOS(wordId);
} else {
byte value = buffer.get(wordId + 6);
return POS.resolveTag(value);
}
}
@Override
public String getReading(int wordId) {
if (hasReadingData(wordId)) {
int offset = wordId + 6;
return readString(offset);
}
return null;
}
@Override
public Morpheme[] getMorphemes(int wordId, char[] surfaceForm, int off, int len) {
POS.Type posType = getPOSType(wordId);
if (posType == POS.Type.MORPHEME) {
return null;
}
int offset = wordId + 6;
boolean hasSinglePos = hasSinglePOS(wordId);
if (hasSinglePos == false) {
offset++; // skip rightPOS
}
int length = buffer.get(offset++);
if (length == 0) {
return null;
}
Morpheme[] morphemes = new Morpheme[length];
int surfaceOffset = 0;
final POS.Tag leftPOS = getLeftPOS(wordId);
for (int i = 0; i < length; i++) {
final String form;
final POS.Tag tag = hasSinglePos ? leftPOS : POS.resolveTag(buffer.get(offset++));
if (posType == POS.Type.INFLECT) {
form = readString(offset);
offset += form.length() * 2 + 1;
} else {
int formLen = buffer.get(offset++);
form = new String(surfaceForm, off+surfaceOffset, formLen);
surfaceOffset += formLen;
}
morphemes[i] = new Morpheme(tag, form);
}
return morphemes;
}
private String readString(int offset) {
int strOffset = offset;
int len = buffer.get(strOffset++);
char[] text = new char[len];
for (int i = 0; i < len; i++) {
text[i] = buffer.getChar(strOffset + (i<<1));
}
return new String(text);
}
private boolean hasSinglePOS(int wordId) {
return (buffer.getShort(wordId+2) & HAS_SINGLE_POS) != 0;
}
private boolean hasReadingData(int wordId) {
return (buffer.getShort(wordId+2) & HAS_READING) != 0;
}
/** flag that the entry has a single part of speech (leftPOS) */
public static final int HAS_SINGLE_POS = 1;
/** flag that the entry has reading data. otherwise reading is surface form */
public static final int HAS_READING = 2;
} | 1 | 32,830 | so .. this basically follows the pattern from JapaneseTokenizer, I think. .. but somehow I don't see where we defined ResourceScheme? We're not referencing the one in kuromoji, right? | apache-lucene-solr | java |
@@ -525,8 +525,11 @@ namespace SkylineNightly
private void DownloadSkylineTester(string skylineTesterZip, RunMode mode)
{
- // Make sure we can negotiate with HTTPS servers that demand TLS 1.2 (default in dotNet 4.6, but has to be turned on in 4.5)
- ServicePointManager.SecurityProtocol |= (SecurityProtocolType.Tls | SecurityProtocolType.Tls11 | SecurityProtocolType.Tls12);
+ // The current recommendation from MSFT for future-proofing HTTPS https://docs.microsoft.com/en-us/dotnet/framework/network-programming/tls
+ // is don't specify TLS levels at all, let the OS decide. But we worry that this will mess up Win7 and Win8 installs, so we continue to specify explicitly
+ var Tls13 = (SecurityProtocolType)12288; // From decompiled SecurityProtocolType - compiler has no definition for some reason
+ ServicePointManager.SecurityProtocol |= SecurityProtocolType.Tls | SecurityProtocolType.Tls11 | SecurityProtocolType.Tls12 | Tls13;
+
using (var client = new WebClient())
{
client.Credentials = new NetworkCredential(TEAM_CITY_USER_NAME, TEAM_CITY_USER_PASSWORD); | 1 | /*
* Original author: Don Marsh <donmarsh .at. u.washington.edu>,
* MacCoss Lab, Department of Genome Sciences, UW
*
* Copyright 2014 University of Washington - Seattle, WA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
using System;
using System.Collections.Generic;
using System.Diagnostics;
using System.Drawing;
using System.Drawing.Imaging;
using System.Globalization;
using System.IO;
using System.Linq;
using System.Net;
using System.Reflection;
using System.ServiceModel;
using System.Text;
using System.Text.RegularExpressions;
using System.Threading;
using System.Windows.Forms;
using System.Xml.Linq;
using Ionic.Zip;
using Microsoft.Win32.TaskScheduler;
using SkylineNightly.Properties;
namespace SkylineNightly
{
// ReSharper disable LocalizableElement
public class Nightly
{
private const string NIGHTLY_TASK_NAME = "Skyline nightly build";
private const string TEAM_CITY_ZIP_URL = "https://teamcity.labkey.org/guestAuth/repository/download/{0}/.lastFinished/SkylineTester.zip{1}";
private const string TEAM_CITY_BUILD_TYPE_64_MASTER = "bt209";
// N.B. choice of "release" and "integration" branches is made in TeamCity VCS Roots "pwiz Github Skyline_Integration_Only" and "pwiz Github Skyline_Release_Only"
// Thus TC admins can easily change the "release" and "integration" git branches at http://teamcity.labkey.org/admin/editProject.html?projectId=ProteoWizard&tab=projectVcsRoots
private const string TEAM_CITY_BUILD_TYPE_64_RELEASE = "ProteoWizard_WindowsX8664SkylineReleaseBranchMsvcProfessional";
private const string TEAM_CITY_BUILD_TYPE_64_INTEGRATION = "ProteoWizard_SkylineIntegrationBranchX8664";
private const string TEAM_CITY_USER_NAME = "guest";
private const string TEAM_CITY_USER_PASSWORD = "guest";
private const string LABKEY_PROTOCOL = "https";
private const string LABKEY_SERVER_ROOT = "skyline.ms";
private const string LABKEY_MODULE = "testresults";
private const string LABKEY_ACTION = "post";
private const string LABKEY_EMAIL_NOTIFICATION_ACTION = "sendEmailNotification";
private static string GetPostUrl(string path)
{
return GetUrl(path, LABKEY_MODULE, LABKEY_ACTION);
}
private static string GetUrl(string path, string controller, string action)
{
return LABKEY_PROTOCOL + "://" + LABKEY_SERVER_ROOT + "/" + controller + "/" + path + "/" +
action + ".view";
}
private static string LABKEY_URL = GetPostUrl("home/development/Nightly%20x64");
private static string LABKEY_PERF_URL = GetPostUrl("home/development/Performance%20Tests");
private static string LABKEY_STRESS_URL = GetPostUrl("home/development/NightlyStress");
private static string LABKEY_RELEASE_URL = GetPostUrl("home/development/Release%20Branch");
private static string LABKEY_RELEASE_PERF_URL = GetPostUrl("home/development/Release%20Branch%20Performance%20Tests");
private static string LABKEY_INTEGRATION_URL = GetPostUrl("home/development/Integration");
private static string LABKEY_INTEGRATION_PERF_URL = GetPostUrl("home/development/Integration%20With%20Perf%20Tests");
private static string LABKEY_HOME_URL = GetUrl("home", "project", "begin");
public static string LABKEY_EMAIL_NOTIFICATION_URL = GetUrl("home/development/Nightly%20x64", LABKEY_MODULE, LABKEY_EMAIL_NOTIFICATION_ACTION);
private static string LABKEY_CSRF = @"X-LABKEY-CSRF";
private const string GIT_MASTER_URL = "https://github.com/ProteoWizard/pwiz";
private const string GIT_BRANCHES_URL = GIT_MASTER_URL + "/tree/";
private DateTime _startTime;
public string LogFileName { get; private set; }
private readonly Xml _nightly;
private readonly Xml _failures;
private readonly Xml _leaks;
private Xml _pass;
private readonly string _logDir;
private readonly RunMode _runMode;
private string PwizDir
{
get
{
// Place source code in SkylineTester instead of next to it, so we can
// still proceed if the source tree is locked against delete for any reason
return Path.Combine(_skylineTesterDir, "pwiz");
}
}
private string _skylineTesterDir;
public const int DEFAULT_DURATION_HOURS = 9;
public const int PERF_DURATION_HOURS = 12;
public Nightly(RunMode runMode, string decorateSrcDirName = null)
{
_runMode = runMode;
_nightly = new Xml("nightly");
_failures = _nightly.Append("failures");
_leaks = _nightly.Append("leaks");
// Locate relevant directories.
var nightlyDir = GetNightlyDir();
_logDir = Path.Combine(nightlyDir, "Logs");
// Clean up after any old screengrab directories
var logDirScreengrabs = Path.Combine(_logDir, "NightlyScreengrabs");
if (Directory.Exists(logDirScreengrabs))
Directory.Delete(logDirScreengrabs, true);
// First guess at working directory - distinguish between run types for machines that do double duty
_skylineTesterDir = Path.Combine(nightlyDir, "SkylineTesterForNightly_"+runMode + (decorateSrcDirName ?? string.Empty));
}
public static string NightlyTaskName { get { return NIGHTLY_TASK_NAME; } }
public static string NightlyTaskNameWithUser { get { return string.Format("{0} ({1})", NIGHTLY_TASK_NAME, Environment.UserName);} }
public static Task NightlyTask
{
get
{
using (var ts = new TaskService())
return ts.FindTask(NightlyTaskName) ?? ts.FindTask(NightlyTaskNameWithUser);
}
}
public bool WithPerfTests => _runMode != RunMode.trunk && _runMode != RunMode.integration && _runMode != RunMode.release;
public TimeSpan TargetDuration
{
get
{
if (_runMode == RunMode.stress)
{
return TimeSpan.FromHours(168); // Let it go as long as a week
}
else if (WithPerfTests)
{
return TimeSpan.FromHours(PERF_DURATION_HOURS); // Let it go a bit longer than standard 9 hours
}
return TimeSpan.FromHours(DEFAULT_DURATION_HOURS);
}
}
public void Finish(string message, string errMessage)
{
// Leave a note for the user, in a way that won't interfere with our next run
Log("Done. Exit message: ");
Log(!string.IsNullOrEmpty(message) ? message : "none");
if (!string.IsNullOrEmpty(errMessage))
Log(errMessage);
if (string.IsNullOrEmpty(LogFileName))
{
if (!string.IsNullOrEmpty(message))
{
MessageBox.Show(message, @"SkylineNightly Help");
}
}
else
{
var process = new Process
{
StartInfo =
{
FileName = "notepad.exe",
Arguments = LogFileName
}
};
process.Start();
}
}
public enum RunMode { parse, post, trunk, perf, release, stress, integration, release_perf, integration_perf }
private string SkylineTesterStoppedByUser = "SkylineTester stopped by user";
public string RunAndPost()
{
var runResult = Run() ?? string.Empty;
if (runResult.Equals(SkylineTesterStoppedByUser))
{
Log("No results posted");
return runResult;
}
Parse();
var postResult = Post(_runMode);
if (!string.IsNullOrEmpty(postResult))
{
if (!string.IsNullOrEmpty(runResult))
runResult += "\n";
runResult += postResult;
}
return runResult;
}
/// <summary>
/// Run nightly build/test and report results to server.
/// </summary>
public string Run()
{
string result = string.Empty;
// Locate relevant directories.
var nightlyDir = GetNightlyDir();
var skylineNightlySkytr = Path.Combine(nightlyDir, "SkylineNightly.skytr");
// Kill any other instance of SkylineNightly, unless this is
// the StressTest mode, in which case assume that a previous invocation
// is still running and just exit to stay out of its way.
foreach (var process in Process.GetProcessesByName("skylinenightly"))
{
if (process.Id != Process.GetCurrentProcess().Id)
{
if (_runMode == RunMode.stress)
{
Application.Exit(); // Just let the already (long!) running process do its thing
}
else
{
process.Kill();
}
}
}
// Kill processes started within the proposed working directory - most likely SkylineTester and/or TestRunner.
// This keeps stuck tests around for 24 hours, which should be sufficient, but allows us to replace directory
// on a daily basis - otherwise we could fill the hard drive on smaller machines
foreach (var process in Process.GetProcesses())
{
try
{
if (process.Modules[0].FileName.StartsWith(_skylineTesterDir) &&
process.Id != Process.GetCurrentProcess().Id)
{
process.Kill();
}
}
// ReSharper disable once EmptyGeneralCatchClause
catch (Exception)
{
}
}
// Create place to put run logs
if (!Directory.Exists(_logDir))
Directory.CreateDirectory(_logDir);
// Start the nightly log file
StartLog(_runMode);
// Delete source tree and old SkylineTester.
Delete(skylineNightlySkytr);
Log("Delete SkylineTester");
var skylineTesterDirBasis = _skylineTesterDir; // Default name
const int maxRetry = 1000; // Something would have to be very wrong to get here, but better not to risk a hang
string nextDir = _skylineTesterDir;
for (var retry = 1; retry < maxRetry; retry++)
{
try
{
if (!Directory.Exists(nextDir))
break;
string deleteDir = nextDir;
// Keep going until a directory is found that does not exist
nextDir = skylineTesterDirBasis + "_" + retry;
Delete(deleteDir);
}
catch (Exception e)
{
if (Directory.Exists(_skylineTesterDir))
{
// Work around undeletable file that sometimes appears under Windows 10
Log("Unable to delete " + _skylineTesterDir + "(" + e + "), using " + nextDir + " instead.");
_skylineTesterDir = nextDir;
}
}
}
Log("buildRoot is " + PwizDir);
// We used to put source tree alongside SkylineTesterDir instead of under it, delete that too
try
{
Delete(Path.Combine(nightlyDir, "pwiz"));
}
// ReSharper disable once EmptyGeneralCatchClause
catch
{
}
Directory.CreateDirectory(_skylineTesterDir);
// Download most recent build of SkylineTester.
var skylineTesterZip = Path.Combine(_skylineTesterDir, skylineTesterDirBasis + ".zip");
const int attempts = 30;
string branchUrl = null;
for (int i = 0; i < attempts; i++)
{
try
{
DownloadSkylineTester(skylineTesterZip, _runMode);
}
catch (Exception ex)
{
Log("Exception while downloading SkylineTester: " + ex.Message + " (Probably still being built, will retry every 60 seconds for 30 minutes.)");
if (i == attempts-1)
{
LogAndThrow("Unable to download SkylineTester");
}
Thread.Sleep(60*1000); // one minute
continue;
}
// Install SkylineTester.
if (!InstallSkylineTester(skylineTesterZip, _skylineTesterDir))
LogAndThrow("SkylineTester installation failed.");
try
{
// Delete zip file.
Log("Delete zip file " + skylineTesterZip);
File.Delete(skylineTesterZip);
// Figure out which branch we're working in - there's a file in the downloaded SkylineTester zip that tells us.
var branchLine = File.ReadAllLines(Path.Combine(_skylineTesterDir, "SkylineTester Files", "Version.cpp")).FirstOrDefault(l => l.Contains("Version::Branch"));
if (!string.IsNullOrEmpty(branchLine))
{
// Looks like std::string Version::Branch() {return "Skyline/skyline_9_7";}
var branch = branchLine.Split(new[] { "\"" }, StringSplitOptions.None)[1];
if (branch.Equals("master"))
{
branchUrl = GIT_MASTER_URL;
}
else
{
branchUrl = GIT_BRANCHES_URL + branch; // Looks like https://github.com/ProteoWizard/pwiz/tree/Skyline/skyline_9_7
}
}
break;
}
catch (Exception ex)
{
Log("Exception while unzipping SkylineTester: " + ex.Message + " (Probably still being built, will retry every 60 seconds for 30 minutes.)");
if (i == attempts - 1)
{
LogAndThrow("Unable to identify branch from Version.cpp in SkylineTester");
}
Thread.Sleep(60 * 1000); // one minute
}
}
// Create ".skytr" file to execute nightly build in SkylineTester.
var assembly = Assembly.GetExecutingAssembly();
const string resourceName = "SkylineNightly.SkylineNightly.skytr";
double durationHours;
using (var stream = assembly.GetManifestResourceStream(resourceName))
{
if (stream == null)
{
LogAndThrow(result = "Embedded resource is broken");
return result;
}
using (var reader = new StreamReader(stream))
{
var skylineTester = Xml.FromString(reader.ReadToEnd());
skylineTester.GetChild("nightlyStartTime").Set(DateTime.Now.ToShortTimeString());
skylineTester.GetChild("nightlyRoot").Set(nightlyDir);
skylineTester.GetChild("buildRoot").Set(_skylineTesterDir);
skylineTester.GetChild("nightlyRunPerfTests").Set(WithPerfTests ? "true" : "false");
skylineTester.GetChild("nightlyDuration").Set(((int)TargetDuration.TotalHours).ToString());
skylineTester.GetChild("nightlyRepeat").Set(_runMode == RunMode.stress ? "100" : "1");
skylineTester.GetChild("nightlyRandomize").Set(_runMode == RunMode.stress ? "true" : "false");
if (!string.IsNullOrEmpty(branchUrl) && branchUrl.Contains("tree"))
{
skylineTester.GetChild("nightlyBuildTrunk").Set("false");
skylineTester.GetChild("nightlyBranch").Set("true");
skylineTester.GetChild("nightlyBranchUrl").Set(branchUrl);
Log("Testing branch at " + branchUrl);
}
skylineTester.Save(skylineNightlySkytr);
durationHours = double.Parse(skylineTester.GetChild("nightlyDuration").Value);
}
}
// Start SkylineTester to do the build.
var skylineTesterExe = Path.Combine(_skylineTesterDir, "SkylineTester Files", "SkylineTester.exe");
Log(string.Format("Starting {0} with config file {1}, which contains:", skylineTesterExe, skylineNightlySkytr));
foreach (var line in File.ReadAllLines(skylineNightlySkytr))
{
Log(line);
}
var processInfo = new ProcessStartInfo(skylineTesterExe, skylineNightlySkytr)
{
WorkingDirectory = Path.GetDirectoryName(skylineTesterExe) ?? ""
};
bool retryTester;
const int maxRetryMinutes = 60;
var logMonitor = new LogFileMonitor(_logDir, LogFileName, _runMode);
logMonitor.Start();
do
{
var skylineTesterProcess = Process.Start(processInfo);
if (skylineTesterProcess == null)
{
LogAndThrow(result = "SkylineTester did not start");
return result;
}
Log("SkylineTester started");
// Calculate end time: convert to UTC, add the duration, then convert back to local time.
// Conversion to UTC before adding the duration avoids DST issues.
var endTime = skylineTesterProcess.StartTime.ToUniversalTime().AddHours(durationHours).ToLocalTime();
var originalEndTime = endTime;
for (;; Thread.Sleep(1000))
{
if (skylineTesterProcess.HasExited)
{
if (skylineTesterProcess.ExitCode == 0xDEAD)
{
// User killed, don't post
Log(result = SkylineTesterStoppedByUser);
return result;
}
Log("SkylineTester finished");
break;
}
else if (DateTime.Now > endTime.AddMinutes(30)) // 30 minutes grace before we kill SkylineTester
{
SaveErrorScreenshot();
Log(result = "SkylineTester has exceeded its " + durationHours + " hour runtime. You should investigate.");
break;
}
if (endTime == originalEndTime)
{
if (logMonitor.IsHang)
{
var now = DateTime.Now;
if (9 <= now.Hour && now.Hour < 17)
{
// between 9am-5pm, set end time to 4 hours from now (unless scheduled end is already 4+ hours from now)
var newEndTime = DateTime.Now.AddHours(4);
if (newEndTime > originalEndTime && SetEndTime(newEndTime))
endTime = newEndTime;
}
else
{
// extend the end time until 12pm to give us more time to attach a debugger
var newEndTime = originalEndTime.AddHours(16);
newEndTime = new DateTime(newEndTime.Year, newEndTime.Month, newEndTime.Day, 12, 0, 0);
if (SetEndTime(newEndTime))
endTime = newEndTime;
}
}
}
else if (!logMonitor.IsHang)
{
// If we get here, we've already extended the end time due to a hang and log file is now being modified again.
DateTime newEndTime;
if (logMonitor.IsDebugger)
{
// Assume that the log file is being modified because someone has taken manual action, and extend the end time further
// to prevent SkylineTester from being killed while someone is looking at it.
newEndTime = originalEndTime.AddDays(2);
}
else
{
// SkylineTester continued without a debugger being attached. Restore original end time.
newEndTime = originalEndTime;
var min = DateTime.Now.AddMinutes(1);
if (newEndTime <= min)
newEndTime = min;
}
if (endTime != newEndTime && SetEndTime(newEndTime))
endTime = newEndTime;
}
}
var actualDuration = DateTime.UtcNow - skylineTesterProcess.StartTime.ToUniversalTime();
retryTester = actualDuration.TotalMinutes < maxRetryMinutes;
if (retryTester)
{
// Retry a very short test run if there is no log file or the log file does not contain any tests
string logFile = GetLatestLog();
if (logFile != null && File.Exists(logFile))
retryTester = ParseTests(File.ReadAllText(logFile), false) == 0;
if (retryTester)
{
Log("No tests run in " + Math.Round(actualDuration.TotalMinutes) + " minutes retrying.");
}
}
}
while (retryTester);
logMonitor.Stop();
return result;
}
public void StartLog(RunMode runMode)
{
_startTime = DateTime.Now;
// Create log file.
LogFileName = Path.Combine(_logDir, string.Format("SkylineNightly-{0}-{1}.log", runMode, _startTime.ToString("yyyy-MM-dd-HH-mm", CultureInfo.InvariantCulture)));
Log(_startTime.ToShortDateString());
}
private void DownloadSkylineTester(string skylineTesterZip, RunMode mode)
{
// Make sure we can negotiate with HTTPS servers that demand TLS 1.2 (default in dotNet 4.6, but has to be turned on in 4.5)
ServicePointManager.SecurityProtocol |= (SecurityProtocolType.Tls | SecurityProtocolType.Tls11 | SecurityProtocolType.Tls12);
using (var client = new WebClient())
{
client.Credentials = new NetworkCredential(TEAM_CITY_USER_NAME, TEAM_CITY_USER_PASSWORD);
var isRelease = ((mode == RunMode.release) || (mode == RunMode.release_perf));
var isIntegration = mode == RunMode.integration || mode == RunMode.integration_perf;
var branchType = (isRelease||isIntegration) ? "" : "?branch=master"; // TC has a config just for release branch, and another for integration branch, but main config builds pull requests, other branches etc
var buildType = isIntegration ? TEAM_CITY_BUILD_TYPE_64_INTEGRATION : isRelease ? TEAM_CITY_BUILD_TYPE_64_RELEASE : TEAM_CITY_BUILD_TYPE_64_MASTER;
string zipFileLink = string.Format(TEAM_CITY_ZIP_URL, buildType, branchType);
Log("Download SkylineTester zip file as " + zipFileLink);
client.DownloadFile(zipFileLink, skylineTesterZip); // N.B. depending on caller to do try/catch
}
}
private bool InstallSkylineTester(string skylineTesterZip, string skylineTesterDir)
{
using (var zipFile = new ZipFile(skylineTesterZip))
{
try
{
Log("Unzip SkylineTester");
zipFile.ExtractAll(skylineTesterDir, ExtractExistingFileAction.OverwriteSilently);
}
catch (Exception e)
{
Log("Error attempting to unzip SkylineTester: " + e);
return false;
}
}
return true;
}
public RunMode Parse(string logFile = null, bool parseOnlyNoXmlOut = false)
{
if (logFile == null)
logFile = GetLatestLog();
if (logFile == null || !File.Exists(logFile))
throw new Exception(string.Format("cannot locate {0}", logFile ?? "current log"));
var log = File.ReadAllText(logFile);
var parsedDuration = TargetDuration;
// Extract log start time from log contents
var reStartTime = new Regex(@"\n\# Nightly started (.*)\r\n", RegexOptions.Compiled); // As in "# Nightly started Thursday, May 12, 2016 8:00 PM"
var reStoppedTime = new Regex(@"\n\# Stopped (.*)\r\n");
var stMatch = reStartTime.Match(log);
if (stMatch.Success)
{
var dateTimeStr = stMatch.Groups[1].Value;
if (DateTime.TryParse(dateTimeStr, out _startTime))
{
_startTime = DateTime.SpecifyKind(_startTime, DateTimeKind.Local);
}
}
var endMatch = reStoppedTime.Match(log);
if (endMatch.Success)
{
var dateTimeEnd = endMatch.Groups[1].Value;
if (DateTime.TryParse(dateTimeEnd, out var endTime))
{
endTime = DateTime.SpecifyKind(endTime, DateTimeKind.Local);
parsedDuration = endTime.ToUniversalTime() - _startTime.ToUniversalTime();
}
}
// Extract all test lines.
var testCount = ParseTests(log);
// Extract failures.
ParseFailures(log);
// Extract leaks.
ParseLeaks(log);
var hasPerftests = log.Contains("# Perf tests");
var isIntegration = new Regex(@"git\.exe.*clone.*-b").IsMatch(log);
var isTrunk = !isIntegration && !log.Contains("Testing branch at");
var machineName = Environment.MachineName;
// Get machine name from logfile name, in case it's not from this machine
var reMachineName = new Regex(@"(.*)_\d+\-\d+\-\d+_\d+\-\d+\-\d+\.\w+", RegexOptions.Compiled); // As in "NATBR-LAB-PC_2016-05-12_20-00-19.log"
var mnMatch = reMachineName.Match(Path.GetFileName(logFile));
if (mnMatch.Success)
{
machineName = mnMatch.Groups[1].Value.ToUpperInvariant();
}
// See if we can parse revision info from the log
string revisionInfo = null;
string gitHash = null;
// Checked out revision 9708.
var reRevision = new Regex(@"\nChecked out revision (.*)\.\r\n", RegexOptions.Compiled); // As in "Checked out revision 9708."
var revMatch = reRevision.Match(log);
if (revMatch.Success)
{
revisionInfo = revMatch.Groups[1].Value;
gitHash = "(svn)";
}
else // Look for log message where we emit our build ID
{
// look for build message like "ProteoWizard 3.0.18099.a0147f2 x64 AMD64"
reRevision = new Regex(@"\nProteoWizard \d+\.\d+\.([^ ]*)\.([^ ]*).*\r\n", RegexOptions.Compiled);
revMatch = reRevision.Match(log);
if (revMatch.Success)
{
revisionInfo = revMatch.Groups[1].Value;
gitHash = revMatch.Groups[2].Value;
}
}
_nightly["id"] = machineName;
_nightly["os"] = Environment.OSVersion;
var buildroot = ParseBuildRoot(log);
_nightly["revision"] = revisionInfo ?? GetRevision(buildroot);
_nightly["git_hash"] = gitHash ?? string.Empty;
_nightly["start"] = _startTime;
int durationMinutes = (int)parsedDuration.TotalMinutes;
// Round down or up by 1 minute to report even hours in this common case
if (durationMinutes % 60 == 1)
durationMinutes--;
else if (durationMinutes % 60 == 59)
durationMinutes++;
_nightly["duration"] = durationMinutes;
_nightly["testsrun"] = testCount;
_nightly["failures"] = _failures.Count;
_nightly["leaks"] = _leaks.Count;
// Save XML file.
if (!parseOnlyNoXmlOut)
{
var xmlFile = Path.ChangeExtension(logFile, ".xml");
File.WriteAllText(xmlFile, _nightly.ToString());
}
return isTrunk
? (hasPerftests ? RunMode.perf : RunMode.trunk)
: (isIntegration ? (hasPerftests ? RunMode.integration_perf : RunMode.integration) : (hasPerftests ? RunMode.release_perf : RunMode.release));
}
private class TestLogLineProperties
{
private enum EndType { heaps, handles, old, none }
private static Regex END_TEST_OLD = new Regex(@" \d+ failures, ([\.\d]+)/([\.\d]+) MB, (\d+) sec\.\r\n", RegexOptions.Compiled);
private static Regex END_TEST_HANDLES = new Regex(@" \d+ failures, ([\.\d]+)/([\.\d]+) MB, ([\.\d]+)/([\.\d]+) handles, (\d+) sec\.\r\n", RegexOptions.Compiled);
private static Regex END_TEST_HEAPS = new Regex(@" \d+ failures, ([\.\d]+)/([\.\d]+)/([\.\d]+) MB, ([\.\d]+)/([\.\d]+) handles, (\d+) sec\.\r\n", RegexOptions.Compiled);
private Match _startMatch;
private Match _endMatch;
private EndType _endMatchType;
public TestLogLineProperties(Match startMatch, string log)
{
_startMatch = startMatch;
_endMatchType = FindEndMatch(log);
}
private EndType FindEndMatch(string log)
{
// Enumerate through possible formats, starting with the most recent first
var regexes = new[] { END_TEST_HEAPS, END_TEST_HANDLES, END_TEST_OLD };
for (int i = 0; i < regexes.Length; i++)
{
var match = regexes[i].Match(log, _startMatch.Index);
if (match.Success)
{
_endMatch = match;
return (EndType) i;
}
}
return EndType.none;
}
public string Timestamp { get { return _startMatch.Groups[1].Value; } }
public string PassId { get { return _startMatch.Groups[2].Value; } }
public string TestId { get { return _startMatch.Groups[3].Value; } }
public string Name { get { return _startMatch.Groups[4].Value; } }
public string Language { get { return _startMatch.Groups[5].Value; } }
public string Managed { get { return _endMatch.Groups[1].Value; } }
public string Heaps { get { return _endMatchType == EndType.heaps ? _endMatch.Groups[2].Value : null; } }
public string Total { get { return _endMatch.Groups[_endMatchType == EndType.heaps ? 3 : 2].Value; } }
public string UserGdiHandles { get { return _endMatchType != EndType.old ? _endMatch.Groups[_endMatch.Groups.Count-3].Value : null; } }
public string TotalHandles { get { return _endMatchType != EndType.old ? _endMatch.Groups[_endMatch.Groups.Count-2].Value : null; } }
public string Duration { get { return _endMatch.Groups[_endMatch.Groups.Count-1].Value; } }
public bool IsEnded
{
get
{
return _endMatchType != EndType.none &&
!string.IsNullOrEmpty(Managed) &&
!string.IsNullOrEmpty(Total) &&
!string.IsNullOrEmpty(Duration);
}
}
}
private int ParseTests(string log, bool storeXml = true)
{
var startTest = new Regex(@"\r\n\[(\d\d:\d\d)\] +(\d+).(\d+) +(\S+) +\((\w\w)\) ", RegexOptions.Compiled);
string lastPass = null;
int testCount = 0;
for (var startMatch = startTest.Match(log); startMatch.Success; startMatch = startMatch.NextMatch())
{
var lineProperties = new TestLogLineProperties(startMatch, log);
if (!lineProperties.IsEnded)
continue;
if (lastPass != lineProperties.PassId)
{
lastPass = lineProperties.PassId;
if (storeXml)
{
_pass = _nightly.Append("pass");
_pass["id"] = lineProperties.PassId;
}
}
if (storeXml)
{
var test = _pass.Append("test");
test["id"] = lineProperties.TestId;
test["name"] = lineProperties.Name;
test["language"] = lineProperties.Language;
test["timestamp"] = lineProperties.Timestamp;
test["duration"] = lineProperties.Duration;
test["managed"] = lineProperties.Managed;
if (!string.IsNullOrEmpty(lineProperties.Heaps))
test["committed"] = lineProperties.Heaps;
test["total"] = lineProperties.Total;
if (!string.IsNullOrEmpty(lineProperties.UserGdiHandles))
test["user_gdi"] = lineProperties.UserGdiHandles;
if (!string.IsNullOrEmpty(lineProperties.TotalHandles))
test["handles"] = lineProperties.TotalHandles;
}
testCount++;
}
return testCount;
}
private void ParseFailures(string log)
{
var startFailure = new Regex(@"\r\n!!! (\S+) FAILED\r\n", RegexOptions.Compiled);
var endFailure = new Regex(@"\r\n!!!\r\n", RegexOptions.Compiled);
var failureTest = new Regex(@"\r\n\[(\d\d:\d\d)\] +(\d+).(\d+) +(\S+)\s+\(+(\S+)\)",
RegexOptions.Compiled | RegexOptions.RightToLeft);
for (var startMatch = startFailure.Match(log); startMatch.Success; startMatch = startMatch.NextMatch())
{
var name = startMatch.Groups[1].Value;
var endMatch = endFailure.Match(log, startMatch.Index);
var failureTestMatch = failureTest.Match(log, startMatch.Index);
var timestamp = failureTestMatch.Groups[1].Value;
var passId = failureTestMatch.Groups[2].Value;
var testId = failureTestMatch.Groups[3].Value;
var language = failureTestMatch.Groups[5].Value;
if (string.IsNullOrEmpty(passId) || string.IsNullOrEmpty(testId))
continue;
var failureDescription = log.Substring(startMatch.Index + startMatch.Length,
endMatch.Index - startMatch.Index - startMatch.Length);
var failure = _failures.Append("failure");
failure["name"] = name;
failure["timestamp"] = timestamp;
failure["pass"] = passId;
failure["test"] = testId;
failure["language"] = language;
failure.Set(Environment.NewLine + failureDescription + Environment.NewLine);
}
}
private void ParseLeaks(string log)
{
// Leaks in Private Bytes
var leakPattern = new Regex(@"!!! (\S+) LEAKED ([0-9.]+) bytes", RegexOptions.Compiled);
for (var match = leakPattern.Match(log); match.Success; match = match.NextMatch())
{
var leak = _leaks.Append("leak");
leak["name"] = match.Groups[1].Value;
leak["bytes"] = match.Groups[2].Value;
}
// Leaks in Process and Managed Heaps
var leakTypePattern = new Regex(@"!!! (\S+) LEAKED ([0-9.]+) ([^ ]*) bytes", RegexOptions.Compiled);
for (var match = leakTypePattern.Match(log); match.Success; match = match.NextMatch())
{
var leak = _leaks.Append("leak");
leak["name"] = match.Groups[1].Value;
leak["bytes"] = match.Groups[2].Value;
leak["type"] = match.Groups[3].Value;
}
// Handle leaks
var leakHandlesPattern = new Regex(@"!!! (\S+) HANDLE-LEAKED ([.0-9]+) (\S+)", RegexOptions.Compiled);
for (var match = leakHandlesPattern.Match(log); match.Success; match = match.NextMatch())
{
var leak = _leaks.Append("leak");
leak["name"] = match.Groups[1].Value;
leak["handles"] = match.Groups[2].Value;
leak["type"] = match.Groups[3].Value;
}
}
private string ParseBuildRoot(string log)
{
// Look for: > "C:\Program Files\Git\cmd\git.exe" clone "https://github.com/ProteoWizard/pwiz" "C:\Nightly\SkylineTesterForNightly_trunk\pwiz"
var brPattern = new Regex(@".*"".*git\.exe"" clone "".*"" ""(.*)""", RegexOptions.Compiled);
var match = brPattern.Match(log);
if (!match.Success)
{
brPattern = new Regex(@"Deleting Build directory\.\.\.\r\n\> rmdir /s ""(\S+)""", RegexOptions.Compiled);
match = brPattern.Match(log);
}
if (match.Success)
{
return match.Groups[1].Value;
}
return PwizDir;
}
/// <summary>
/// Post the latest results to the server.
/// </summary>
public string Post(RunMode mode, string xmlFile = null)
{
if (xmlFile == null)
{
xmlFile = GetLatestLog(); // Change extension to .xml below
if (xmlFile == null)
return string.Empty;
xmlFile = Path.ChangeExtension(xmlFile, ".xml"); // In case it's actually the log file name
}
var xml = File.ReadAllText(xmlFile);
var logFile = GetLatestLog();
if (logFile != null)
{
var log = File.ReadAllText(logFile);
XDocument doc = XDocument.Parse(xml);
if (doc.Root != null)
{
doc.Root.Add(new XElement("Log", log));
xml = doc.ToString();
}
}
string url;
// Post to server.
if (mode == RunMode.integration)
url = LABKEY_INTEGRATION_URL;
else if (mode == RunMode.integration_perf)
url = LABKEY_INTEGRATION_PERF_URL;
else if (mode == RunMode.release_perf)
url = LABKEY_RELEASE_PERF_URL;
else if (mode == RunMode.release)
url = LABKEY_RELEASE_URL;
else if (mode == RunMode.perf)
url = LABKEY_PERF_URL;
else if (mode == RunMode.stress)
url = LABKEY_STRESS_URL;
else
url = LABKEY_URL;
var result = PostToLink(url, xml, xmlFile);
var resultParts = result.ToLower().Split(':');
if (resultParts.Length == 2 && resultParts[0].Contains("success") && resultParts[1].Contains("true"))
result = string.Empty;
return result;
}
public string GetLatestLog()
{
return GetLatestLog(_logDir);
}
public static string GetLatestLog(string logDir)
{
var directory = new DirectoryInfo(logDir);
var logFile = directory.GetFiles()
.Where(f => f.Name.StartsWith(Environment.MachineName) && f.Name.EndsWith(".log"))
.OrderByDescending(f => f.LastWriteTime)
.FirstOrDefault();
return logFile == null ? null : logFile.FullName;
}
/// <summary>
/// Post data to the given link URL.
/// </summary>
private string PostToLink(string link, string postData, string filePath)
{
var errmessage = string.Empty;
Log("Posting results to " + link);
for (var retry = 5; retry > 0; retry--)
{
string boundary = "---------------------------" + DateTime.Now.Ticks.ToString("x");
byte[] boundarybytes = Encoding.ASCII.GetBytes("\r\n--" + boundary + "\r\n");
var wr = (HttpWebRequest)WebRequest.Create(link);
wr.ProtocolVersion = HttpVersion.Version10;
wr.ContentType = "multipart/form-data; boundary=" + boundary;
wr.Method = "POST";
wr.KeepAlive = true;
wr.Credentials = CredentialCache.DefaultCredentials;
SetCSRFToken(wr, LogFileName);
var rs = wr.GetRequestStream();
rs.Write(boundarybytes, 0, boundarybytes.Length);
const string headerTemplate = "Content-Disposition: form-data; name=\"{0}\"; filename=\"{1}\"\r\nContent-Type: {2}\r\n\r\n";
string header = string.Format(headerTemplate, "xml_file", filePath != null ? Path.GetFileName(filePath) : "xml_file", "text/xml");
byte[] headerbytes = Encoding.UTF8.GetBytes(header);
rs.Write(headerbytes, 0, headerbytes.Length);
var bytes = Encoding.UTF8.GetBytes(postData);
rs.Write(bytes, 0, bytes.Length);
byte[] trailer = Encoding.ASCII.GetBytes("\r\n--" + boundary + "--\r\n");
rs.Write(trailer, 0, trailer.Length);
rs.Close();
WebResponse wresp = null;
try
{
wresp = wr.GetResponse();
var stream2 = wresp.GetResponseStream();
if (stream2 != null)
{
var reader2 = new StreamReader(stream2);
var result = reader2.ReadToEnd();
return result;
}
}
catch (Exception e)
{
Log(errmessage = e.ToString());
if (wresp != null)
{
wresp.Close();
}
}
if (retry > 1)
{
Thread.Sleep(30000);
Log("Retrying post");
errmessage = String.Empty;
}
}
Log(errmessage = "Failed to post results: " + errmessage);
return errmessage;
}
public static string SendEmailNotification(string to, string subject, string message)
{
var postParams = new List<string>();
if (!string.IsNullOrEmpty(to))
postParams.Add("to=" + Uri.EscapeDataString(to));
if (!string.IsNullOrEmpty(subject))
postParams.Add("subject=" + Uri.EscapeDataString(subject));
if (!string.IsNullOrEmpty(message))
postParams.Add("message=" + Uri.EscapeDataString(message));
var postData = Encoding.ASCII.GetBytes(string.Join("&", postParams));
for (var retry = 5; retry > 0; retry--)
{
var request = (HttpWebRequest) WebRequest.Create(LABKEY_EMAIL_NOTIFICATION_URL);
request.ProtocolVersion = HttpVersion.Version11;
request.ContentType = "application/x-www-form-urlencoded";
request.Method = "POST";
request.KeepAlive = false;
request.Credentials = CredentialCache.DefaultCredentials;
request.Timeout = 30000; // 30 second timeout
SetCSRFToken(request, null);
using (var stream = request.GetRequestStream())
{
stream.Write(postData, 0, postData.Length);
}
try
{
using (var response = (HttpWebResponse)request.GetResponse())
using (var responseStream = response.GetResponseStream())
{
if (responseStream != null)
{
using (var responseReader = new StreamReader(responseStream))
{
return responseReader.ReadToEnd();
}
}
}
}
catch (Exception)
{
if (retry > 1)
Thread.Sleep(30000);
}
}
return null;
}
private static string GetNightlyDir()
{
var nightlyDir = Settings.Default.NightlyFolder;
return Path.IsPathRooted(nightlyDir)
? nightlyDir
// Kept for backward compatibility, but we don't allow this anymore, because the Documents
// folder is a terrible place to be running these high-use, nightly tests from.
: Path.Combine(Environment.GetFolderPath(Environment.SpecialFolder.MyDocuments), nightlyDir);
}
private static string GitCommand(string workingdir, string cmd)
{
var programFiles = Environment.GetFolderPath(Environment.SpecialFolder.ProgramFiles);
var exe = Path.Combine(programFiles, @"Git\cmd\git.exe");
Process git = new Process
{
StartInfo =
{
UseShellExecute = false,
RedirectStandardOutput = true,
RedirectStandardError = true,
FileName = exe,
WorkingDirectory = workingdir,
Arguments = cmd,
CreateNoWindow = true
}
};
git.Start();
var gitOutput = git.StandardOutput.ReadToEnd();
git.WaitForExit();
return gitOutput.Trim();
}
private string GetRevision(string pwizDir)
{
var revisionHash = GitCommand(pwizDir, @"rev-parse --short HEAD");
var revision = "unknownDate." + revisionHash;
return revision;
}
/// <summary>
/// Delete a file or directory, with quite a lot of retry on the expectation that
/// it's probably the TortoiseGit windows explorer icon plugin getting in your way
/// </summary>
private void Delete(string fileOrDir)
{
for (var i = 5; i >0; i--)
{
try
{
DeleteRecursive(fileOrDir);
break;
}
catch (Exception ex)
{
if (i == 1)
throw;
Log("Retrying failed delete of " + fileOrDir + ": " + ex.Message);
var random = new Random();
Thread.Sleep(1000 + random.Next(0, 5000)); // A little stutter-step to avoid unlucky sync with TortoiseGit icon update
}
}
}
private void DeleteRecursive(string fileOrDir)
{
if (File.Exists(fileOrDir))
{
File.SetAttributes(fileOrDir, FileAttributes.Normal); // Protect against failing on read-only files
File.Delete(fileOrDir);
}
else if (Directory.Exists(fileOrDir))
{
foreach (var entry in Directory.EnumerateFileSystemEntries(fileOrDir))
{
DeleteRecursive(entry);
}
Directory.Delete(fileOrDir, true);
}
}
private string Log(string message)
{
return Log(LogFileName, message);
}
public static string Log(string logFileName, string message)
{
var time = DateTime.Now;
var timestampedMessage = string.Format(
"[{0}:{1}:{2}] {3}",
time.Hour.ToString("D2"),
time.Minute.ToString("D2"),
time.Second.ToString("D2"),
message);
if (!string.IsNullOrEmpty(logFileName))
File.AppendAllText(logFileName, timestampedMessage + Environment.NewLine);
return timestampedMessage;
}
private void LogAndThrow(string message)
{
var timestampedMessage = Log(message);
SaveErrorScreenshot();
throw new Exception(timestampedMessage);
}
private void SaveErrorScreenshot()
{
// Capture the screen in hopes of finding exception dialogs etc
// From http://stackoverflow.com/questions/362986/capture-the-screen-into-a-bitmap
try
{
foreach (var screen in Screen.AllScreens) // Handle multi-monitor
{
// Create a new bitmap.
using (var bmpScreenshot = new Bitmap(screen.Bounds.Width, screen.Bounds.Height, PixelFormat.Format32bppArgb))
{
// Create a graphics object from the bitmap.
using (var gfxScreenshot = Graphics.FromImage(bmpScreenshot))
{
// Take the screenshot from the upper left corner to the right bottom corner.
gfxScreenshot.CopyFromScreen(screen.Bounds.X, screen.Bounds.Y,
0, 0, screen.Bounds.Size, CopyPixelOperation.SourceCopy);
// Save the screenshot
const string basename = "SkylineNightly_error_screenshot";
const string ext = ".png";
var fileScreenshot = Path.Combine(GetNightlyDir(), basename + ext);
for (var retry = 0; File.Exists(fileScreenshot); retry++)
fileScreenshot = Path.Combine(GetNightlyDir(), basename + "_" + retry + ext);
bmpScreenshot.Save(fileScreenshot, ImageFormat.Png);
Log("Diagnostic screenshot saved to \"" + fileScreenshot + "\"");
}
}
}
}
catch (Exception x)
{
Log("Could not create diagnostic screenshot: got exception \"" + x.Message + "\"");
}
}
private static void SetCSRFToken(HttpWebRequest postReq, string logFileName)
{
var url = LABKEY_HOME_URL;
var sessionCookies = new CookieContainer();
try
{
var request = (HttpWebRequest)WebRequest.Create(url);
request.Method = @"GET";
request.CookieContainer = sessionCookies;
using (var response = (HttpWebResponse)request.GetResponse())
{
postReq.CookieContainer = sessionCookies;
var csrf = response.Cookies[LABKEY_CSRF];
if (csrf != null)
{
// The server set a cookie called X-LABKEY-CSRF, get its value and add a header to the POST request
postReq.Headers.Add(LABKEY_CSRF, csrf.Value);
}
else
{
Log(logFileName, @"CSRF token not found.");
}
}
}
catch (Exception e)
{
Log(logFileName, $@"Error establishing a session and getting a CSRF token: {e}");
}
}
private static readonly ChannelFactory<IEndTimeSetter> END_TIME_SETTER_FACTORY =
new ChannelFactory<IEndTimeSetter>(new NetNamedPipeBinding(), new EndpointAddress("net.pipe://localhost/Nightly/SetEndTime"));
// Set the end time of an already running nightly run (e.g. if there is a hang and we want to give more time for someone to attach a debugger)
public bool SetEndTime(DateTime endTime)
{
Exception exception = null;
try
{
END_TIME_SETTER_FACTORY.CreateChannel().SetEndTime(endTime);
}
catch (Exception x)
{
exception = x;
}
Log(string.Format("Setting nightly end time to {0} {1}: {2}",
endTime.ToShortDateString(), endTime.ToShortTimeString(), exception == null ? "OK" : exception.Message));
return exception == null;
}
// Allows SkylineNightly to change the stop time of a nightly run via IPC
[ServiceContract]
public interface IEndTimeSetter
{
[OperationContract]
void SetEndTime(DateTime endTime);
}
}
// ReSharper restore LocalizableElement
}
| 1 | 14,343 | It would probably be better to do: const SecurityProtocolType Tls13 = (SecurityProtocolType)12288 | ProteoWizard-pwiz | .cs |
@@ -70,16 +70,6 @@ export default [
updated_by: 1,
value: ''
},
- {
- id: 12,
- key: 'labs',
- value: '{"subscribers":true}',
- type: 'blog',
- created_at: '2015-01-12T18:29:01.000Z',
- created_by: 1,
- updated_at: '2015-10-27T17:39:58.288Z',
- updated_by: 1
- },
{
id: 13,
created_at: '2015-09-11T09:44:30.810Z', | 1 | /* eslint-disable camelcase */
export default [
{
id: 1,
created_at: '2015-09-11T09:44:30.805Z',
created_by: 1,
key: 'title',
type: 'blog',
updated_at: '2015-10-04T16:26:05.195Z',
updated_by: 1,
value: 'Test Blog'
},
{
id: 2,
created_at: '2015-09-11T09:44:30.806Z',
created_by: 1,
key: 'description',
type: 'blog',
updated_at: '2015-10-04T16:26:05.198Z',
updated_by: 1,
value: 'Thoughts, stories and ideas.'
},
{
id: 3,
key: 'logo',
value: '/content/images/2013/Nov/logo.png',
type: 'blog',
created_at: '2013-11-25T14:48:11.000Z',
created_by: 1,
updated_at: '2015-10-27T17:39:58.273Z',
updated_by: 1
},
{
id: 4,
key: 'cover_image',
value: '/content/images/2014/Feb/cover.jpg',
type: 'blog',
created_at: '2013-11-25T14:48:11.000Z',
created_by: 1,
updated_at: '2015-10-27T17:39:58.276Z',
updated_by: 1
},
{
id: 5,
key: 'default_lang',
value: 'en_US',
type: 'blog',
created_at: '2013-11-25T14:48:11.000Z',
created_by: 1,
updated_at: '2015-10-27T17:39:58.278Z',
updated_by: 1
},
{
id: 10,
created_at: '2015-09-11T09:44:30.809Z',
created_by: 1,
key: 'codeinjection_head',
type: 'blog',
updated_at: '2015-09-23T13:32:49.858Z',
updated_by: 1,
value: ''
},
{
id: 11,
created_at: '2015-09-11T09:44:30.809Z',
created_by: 1,
key: 'codeinjection_foot',
type: 'blog',
updated_at: '2015-09-23T13:32:49.858Z',
updated_by: 1,
value: ''
},
{
id: 12,
key: 'labs',
value: '{"subscribers":true}',
type: 'blog',
created_at: '2015-01-12T18:29:01.000Z',
created_by: 1,
updated_at: '2015-10-27T17:39:58.288Z',
updated_by: 1
},
{
id: 13,
created_at: '2015-09-11T09:44:30.810Z',
created_by: 1,
key: 'navigation',
type: 'blog',
updated_at: '2015-09-23T13:32:49.868Z',
updated_by: 1,
value: JSON.stringify([
{label: 'Home', url: '/'},
{label: 'About', url: '/about'}
])
},
{
id: 14,
created_at: '2015-09-11T09:44:30.810Z',
created_by: 1,
key: 'is_private',
type: 'blog',
updated_at: '2015-09-23T13:32:49.868Z',
updated_by: 1,
value: false
},
{
id: 15,
created_at: '2015-09-11T09:44:30.810Z',
created_by: 1,
key: 'password',
type: 'blog',
updated_at: '2015-09-23T13:32:49.868Z',
updated_by: 1,
value: ''
},
{
id: 16,
created_at: '2016-05-05T15:04:03.115Z',
created_by: 1,
key: 'slack',
type: 'blog',
updated_at: '2016-05-05T18:33:09.168Z',
updated_by: 1,
value: '[{"url":""}]'
},
{
id: 17,
created_at: '2016-05-05T15:40:12.133Z',
created_by: 1,
key: 'facebook',
type: 'blog',
updated_at: '2016-05-08T15:20:25.953Z',
updated_by: 1,
value: 'test'
},
{
id: 18,
created_at: '2016-05-05T15:40:12.134Z',
created_by: 1,
key: 'twitter',
type: 'blog',
updated_at: '2016-05-08T15:20:25.954Z',
updated_by: 1,
value: '@test'
},
{
id: 19,
created_at: '2015-09-11T09:44:30.810Z',
created_by: 1,
key: 'active_timezone',
type: 'blog',
updated_at: '2015-09-23T13:32:49.868Z',
updated_by: 1,
value: 'Etc/UTC'
},
{
id: 21,
created_at: '2017-01-09T08:40:59.000Z',
created_by: 1,
key: 'amp',
type: 'blog',
updated_at: '2017-01-09T08:49:42.991Z',
updated_by: 1,
value: 'true'
},
{
id: 22,
key: 'icon',
value: '/content/images/2014/Feb/favicon.ico',
type: 'blog',
created_at: '2013-11-25T14:48:11.000Z',
created_by: 1,
updated_at: '2015-10-27T17:39:58.276Z',
updated_by: 1
}
];
| 1 | 9,309 | Similar to the above, put this setting back but keep the value as `'{}'` | TryGhost-Admin | js |
@@ -130,7 +130,7 @@ def refresh_listen_count_aggregate():
Assuming today is 2022-01-01 and this function is called for year_offset 1 and
year_count 1 then all of 2021 will be refreshed.
"""
-
+ logger.info("Starting to refresh continuous aggregates:")
timescale.init_db_connection(config.SQLALCHEMY_TIMESCALE_URI)
end_ts = int(datetime.now().timestamp()) - SECONDS_IN_A_YEAR | 1 | import time
from collections import defaultdict
from datetime import datetime, timedelta
import psycopg2
from psycopg2.errors import UntranslatableCharacter
import sqlalchemy
import logging
from brainzutils import cache
from listenbrainz.utils import init_cache
from listenbrainz import db
from listenbrainz.db import timescale
from listenbrainz.listenstore.timescale_listenstore import REDIS_USER_LISTEN_COUNT, REDIS_USER_TIMESTAMPS, DATA_START_YEAR_IN_SECONDS
from listenbrainz import config
logger = logging.getLogger(__name__)
NUM_YEARS_TO_PROCESS_FOR_CONTINUOUS_AGGREGATE_REFRESH = 3
SECONDS_IN_A_YEAR = 31536000
def recalculate_all_user_data():
timescale.init_db_connection(config.SQLALCHEMY_TIMESCALE_URI)
db.init_db_connection(config.SQLALCHEMY_DATABASE_URI)
init_cache(host=config.REDIS_HOST, port=config.REDIS_PORT,
namespace=config.REDIS_NAMESPACE)
# Find the created timestamp of the last listen
query = "SELECT max(created) FROM listen WHERE created > :date"
try:
with timescale.engine.connect() as connection:
result = connection.execute(sqlalchemy.text(
query), date=datetime.now() - timedelta(weeks=4))
row = result.fetchone()
last_created_ts = row[0]
except psycopg2.OperationalError as e:
logger.error("Cannot query ts to fetch latest listen." %
str(e), exc_info=True)
raise
logger.info("Last created timestamp: " + str(last_created_ts))
# Select a list of users
user_list = []
query = 'SELECT musicbrainz_id FROM "user"'
try:
with db.engine.connect() as connection:
result = connection.execute(sqlalchemy.text(query))
for row in result:
user_list.append(row[0])
except psycopg2.OperationalError as e:
logger.error("Cannot query db to fetch user list." %
str(e), exc_info=True)
raise
logger.info("Fetched %d users. Setting empty cache entries." %
len(user_list))
# Reset the timestamps and listen counts to 0 for all users
for user_name in user_list:
cache.set(REDIS_USER_LISTEN_COUNT + user_name, 0, time=0, encode=False)
cache.set(REDIS_USER_LISTEN_COUNT + user_name, 0, time=0, encode=False)
cache.set(REDIS_USER_TIMESTAMPS + user_name, "0,0", time=0)
# Tabulate all of the listen counts/timestamps for all users
logger.info("Scan the whole listen table...")
listen_counts = defaultdict(int)
user_timestamps = {}
query = "SELECT listened_at, user_name FROM listen where created <= :ts"
try:
with timescale.engine.connect() as connection:
result = connection.execute(
sqlalchemy.text(query), ts=last_created_ts)
for row in result:
ts = row[0]
user_name = row[1]
if user_name not in user_timestamps:
user_timestamps[user_name] = [ts, ts]
else:
if ts > user_timestamps[user_name][1]:
user_timestamps[user_name][1] = ts
if ts < user_timestamps[user_name][0]:
user_timestamps[user_name][0] = ts
listen_counts[user_name] += 1
except psycopg2.OperationalError as e:
logger.error("Cannot query db to fetch user list." %
str(e), exc_info=True)
raise
logger.info("Setting updated cache entries.")
# Set the timestamps and listen counts for all users
for user_name in user_list:
try:
cache._r.incrby(cache._prep_key(
REDIS_USER_LISTEN_COUNT + user_name), listen_counts[user_name])
except KeyError:
pass
try:
tss = cache.get(REDIS_USER_TIMESTAMPS + user_name)
(min_ts, max_ts) = tss.split(",")
min_ts = int(min_ts)
max_ts = int(max_ts)
if min_ts and min_ts < user_timestamps[user_name][0]:
user_timestamps[user_name][0] = min_ts
if max_ts and max_ts > user_timestamps[user_name][1]:
user_timestamps[user_name][1] = max_ts
cache.set(REDIS_USER_TIMESTAMPS + user_name, "%d,%d" %
(user_timestamps[user_name][0], user_timestamps[user_name][1]), time=0)
except KeyError:
pass
def refresh_listen_count_aggregate():
"""
Manually refresh the listen_count continuous aggregate.
Arg:
year_offset: How many years into the past should we start refreshing (e.g 1 year,
will refresh everything that is 1 year or older.
year_count: How many years from year_offset should we update.
Example:
Assuming today is 2022-01-01 and this function is called for year_offset 1 and
year_count 1 then all of 2021 will be refreshed.
"""
timescale.init_db_connection(config.SQLALCHEMY_TIMESCALE_URI)
end_ts = int(datetime.now().timestamp()) - SECONDS_IN_A_YEAR
start_ts = end_ts - \
(NUM_YEARS_TO_PROCESS_FOR_CONTINUOUS_AGGREGATE_REFRESH * SECONDS_IN_A_YEAR) + 1
while True:
query = "call refresh_continuous_aggregate('listen_count_30day', :start_ts, :end_ts)"
t0 = time.monotonic()
try:
with timescale.engine.connect() as connection:
connection.connection.set_isolation_level(0)
connection.execute(sqlalchemy.text(query), {
"start_ts": start_ts,
"end_ts": end_ts
})
except psycopg2.OperationalError as e:
self.log.error("Cannot refresh listen_count_30day cont agg: %s" %
str(e), exc_info=True)
raise
t1 = time.monotonic()
logger.info("Refreshed continuous aggregate for: %s to %s in %.2fs" % (str(
datetime.fromtimestamp(start_ts)), str(datetime.fromtimestamp(end_ts)), t1-t0))
end_ts -= (NUM_YEARS_TO_PROCESS_FOR_CONTINUOUS_AGGREGATE_REFRESH * SECONDS_IN_A_YEAR)
start_ts -= (NUM_YEARS_TO_PROCESS_FOR_CONTINUOUS_AGGREGATE_REFRESH * SECONDS_IN_A_YEAR)
if end_ts < DATA_START_YEAR_IN_SECONDS:
break
class TimescaleListenStoreException(Exception):
pass
| 1 | 18,883 | As discussed in chat - we should run this function within an app context which means that we'd already have a logger configured, and a connection to timescale set up | metabrainz-listenbrainz-server | py |
@@ -33,12 +33,4 @@
<div id="about" class="card">
<h2 class='card-header collapsed collapse-toggle' data-toggle="collapse" data-target="#about-content"><a href="/rails/info/properties">About your application’s environment</a></h2>
<div id="about-content" class="card-body collapse"></div>
-</div>
-
-<script>
- Blacklight.onLoad(function() {
- $('#about .card-header').one('click', function() {
- $($(this).data('target')).load($(this).find('a').attr('href'));
- });
- });
-</script>
+</div> | 1 | <div class="jumbotron text-center">
<h1 class="jumbotron-heading"><%= t('blacklight.welcome') %></h1>
<p class="lead">Blacklight is a multi-institutional open-source collaboration building a better discovery platform framework.</p>
<p>
<%= link_to 'Read the Documentation', 'https://github.com/projectblacklight/blacklight/wiki', class: 'btn btn-primary' %>
<%= link_to 'See Examples', 'http://projectblacklight.org', class: 'btn btn-outline-secondary' %>
</p>
</div>
<div id="getting-started">
<h2>Here’s how to get started customizing your application:</h2>
<p>There are many ways to override specific behaviors and views in Blacklight. Because Blacklight is distributed as a Rails engine-based gem, all customization of Blacklight behavior should be done within your application by overriding Blacklight-provided behaviors with your own.</p>
<p>
<ol>
<li>To modify this text, you need to <a href="http://guides.rubyonrails.org/engines.html#improving-engine-functionality">override the Blacklight-provided view</a>.
You can copy this file, located in the blacklight gem: <br />
<%= Blacklight.root %>/app/views/catalog/_home_text.html.erb <br />
to your own application: <br />
<%= Rails.root %>/app/views/catalog/_home_text.html.erb
</li>
<li><a href="https://github.com/projectblacklight/blacklight/wiki/Indexing-your-data-into-solr">Index your own data</a> into Solr</li>
<li><a href="https://github.com/projectblacklight/blacklight/wiki#blacklight-configuration">Configure Blacklight</a> to match your data and user-experience needs</li>
<li><a href="https://github.com/projectblacklight/blacklight/wiki#support">Get in touch</a> with your comments, questions, and ideas</li>
</ol>
</p>
</div>
<%# This is the same panel shown in the Rails welcome template %>
<div id="about" class="card">
<h2 class='card-header collapsed collapse-toggle' data-toggle="collapse" data-target="#about-content"><a href="/rails/info/properties">About your application’s environment</a></h2>
<div id="about-content" class="card-body collapse"></div>
</div>
<script>
Blacklight.onLoad(function() {
$('#about .card-header').one('click', function() {
$($(this).data('target')).load($(this).find('a').attr('href'));
});
});
</script>
| 1 | 8,098 | I'm curious about why this needed to be removed. | projectblacklight-blacklight | rb |
@@ -17,10 +17,10 @@ from scapy.config import conf
from scapy.base_classes import BasePacket,BasePacketList
from scapy.utils import do_graph,hexdump,make_table,make_lined_table,make_tex_table,get_temp_file
-from scapy.consts import plt, MATPLOTLIB_INLINED, MATPLOTLIB_DEFAULT_PLOT_KARGS
+from scapy.extlib import plt, MATPLOTLIB_INLINED, MATPLOTLIB_DEFAULT_PLOT_KARGS
from functools import reduce
import scapy.modules.six as six
-from scapy.modules.six.moves import filter, range, zip
+from scapy.modules.six.moves import range, zip
############# | 1 | ## This file is part of Scapy
## See http://www.secdev.org/projects/scapy for more informations
## Copyright (C) Philippe Biondi <[email protected]>
## This program is published under a GPLv2 license
"""
PacketList: holds several packets and allows to do operations on them.
"""
from __future__ import absolute_import
from __future__ import print_function
import os,subprocess
from collections import defaultdict
from scapy.config import conf
from scapy.base_classes import BasePacket,BasePacketList
from scapy.utils import do_graph,hexdump,make_table,make_lined_table,make_tex_table,get_temp_file
from scapy.consts import plt, MATPLOTLIB_INLINED, MATPLOTLIB_DEFAULT_PLOT_KARGS
from functools import reduce
import scapy.modules.six as six
from scapy.modules.six.moves import filter, range, zip
#############
## Results ##
#############
class PacketList(BasePacketList):
__slots__ = ["stats", "res", "listname"]
def __init__(self, res=None, name="PacketList", stats=None):
"""create a packet list from a list of packets
res: the list of packets
stats: a list of classes that will appear in the stats (defaults to [TCP,UDP,ICMP])"""
if stats is None:
stats = conf.stats_classic_protocols
self.stats = stats
if res is None:
res = []
elif isinstance(res, PacketList):
res = res.res
self.res = res
self.listname = name
def __len__(self):
return len(self.res)
def _elt2pkt(self, elt):
return elt
def _elt2sum(self, elt):
return elt.summary()
def _elt2show(self, elt):
return self._elt2sum(elt)
def __repr__(self):
stats = {x: 0 for x in self.stats}
other = 0
for r in self.res:
f = 0
for p in stats:
if self._elt2pkt(r).haslayer(p):
stats[p] += 1
f = 1
break
if not f:
other += 1
s = ""
ct = conf.color_theme
for p in self.stats:
s += " %s%s%s" % (ct.packetlist_proto(p._name),
ct.punct(":"),
ct.packetlist_value(stats[p]))
s += " %s%s%s" % (ct.packetlist_proto("Other"),
ct.punct(":"),
ct.packetlist_value(other))
return "%s%s%s%s%s" % (ct.punct("<"),
ct.packetlist_name(self.listname),
ct.punct(":"),
s,
ct.punct(">"))
def __getattr__(self, attr):
return getattr(self.res, attr)
def __getitem__(self, item):
if isinstance(item,type) and issubclass(item,BasePacket):
return self.__class__([x for x in self.res if item in self._elt2pkt(x)],
name="%s from %s"%(item.__name__,self.listname))
if isinstance(item, slice):
return self.__class__(self.res.__getitem__(item),
name = "mod %s" % self.listname)
return self.res.__getitem__(item)
def __getslice__(self, *args, **kargs):
return self.__class__(self.res.__getslice__(*args, **kargs),
name="mod %s"%self.listname)
def __add__(self, other):
return self.__class__(self.res+other.res,
name="%s+%s"%(self.listname,other.listname))
def summary(self, prn=None, lfilter=None):
"""prints a summary of each packet
prn: function to apply to each packet instead of lambda x:x.summary()
lfilter: truth function to apply to each packet to decide whether it will be displayed"""
for r in self.res:
if lfilter is not None:
if not lfilter(r):
continue
if prn is None:
print(self._elt2sum(r))
else:
print(prn(r))
def nsummary(self, prn=None, lfilter=None):
"""prints a summary of each packet with the packet's number
prn: function to apply to each packet instead of lambda x:x.summary()
lfilter: truth function to apply to each packet to decide whether it will be displayed"""
for i, res in enumerate(self.res):
if lfilter is not None:
if not lfilter(res):
continue
print(conf.color_theme.id(i,fmt="%04i"), end=' ')
if prn is None:
print(self._elt2sum(res))
else:
print(prn(res))
def display(self): # Deprecated. Use show()
"""deprecated. is show()"""
self.show()
def show(self, *args, **kargs):
"""Best way to display the packet list. Defaults to nsummary() method"""
return self.nsummary(*args, **kargs)
def filter(self, func):
"""Returns a packet list filtered by a truth function"""
return self.__class__([x for x in self.res if func(x)],
name="filtered %s"%self.listname)
def make_table(self, *args, **kargs):
"""Prints a table using a function that returns for each packet its head column value, head row value and displayed value
ex: p.make_table(lambda x:(x[IP].dst, x[TCP].dport, x[TCP].sprintf("%flags%")) """
return make_table(self.res, *args, **kargs)
def make_lined_table(self, *args, **kargs):
"""Same as make_table, but print a table with lines"""
return make_lined_table(self.res, *args, **kargs)
def make_tex_table(self, *args, **kargs):
"""Same as make_table, but print a table with LaTeX syntax"""
return make_tex_table(self.res, *args, **kargs)
def plot(self, f, lfilter=None, plot_xy=False, **kargs):
"""Applies a function to each packet to get a value that will be plotted
with matplotlib. A list of matplotlib.lines.Line2D is returned.
lfilter: a truth function that decides whether a packet must be plotted
"""
# Get the list of packets
if lfilter is None:
l = [f(e) for e in self.res]
else:
l = [f(e) for e in self.res if lfilter(e)]
# Mimic the default gnuplot output
if kargs == {}:
kargs = MATPLOTLIB_DEFAULT_PLOT_KARGS
if plot_xy:
lines = plt.plot(*zip(*l), **kargs)
else:
lines = plt.plot(l, **kargs)
# Call show() if matplotlib is not inlined
if not MATPLOTLIB_INLINED:
plt.show()
return lines
def diffplot(self, f, delay=1, lfilter=None, **kargs):
"""diffplot(f, delay=1, lfilter=None)
Applies a function to couples (l[i],l[i+delay])
A list of matplotlib.lines.Line2D is returned.
"""
# Get the list of packets
if lfilter is None:
l = [f(self.res[i], self.res[i+1])
for i in range(len(self.res) - delay)]
else:
l = [f(self.res[i], self.res[i+1])
for i in range(len(self.res) - delay)
if lfilter(self.res[i])]
# Mimic the default gnuplot output
if kargs == {}:
kargs = MATPLOTLIB_DEFAULT_PLOT_KARGS
lines = plt.plot(l, **kargs)
# Call show() if matplotlib is not inlined
if not MATPLOTLIB_INLINED:
plt.show()
return lines
def multiplot(self, f, lfilter=None, plot_xy=False, **kargs):
"""Uses a function that returns a label and a value for this label, then
plots all the values label by label.
A list of matplotlib.lines.Line2D is returned.
"""
# Get the list of packets
if lfilter is None:
l = (f(e) for e in self.res)
else:
l = (f(e) for e in self.res if lfilter(e))
# Apply the function f to the packets
d = {}
for k, v in l:
d.setdefault(k, []).append(v)
# Mimic the default gnuplot output
if not kargs:
kargs = MATPLOTLIB_DEFAULT_PLOT_KARGS
if plot_xy:
lines = [plt.plot(*zip(*pl), **dict(kargs, label=k))
for k, pl in six.iteritems(d)]
else:
lines = [plt.plot(pl, **dict(kargs, label=k))
for k, pl in six.iteritems(d)]
plt.legend(loc="center right", bbox_to_anchor=(1.5, 0.5))
# Call show() if matplotlib is not inlined
if not MATPLOTLIB_INLINED:
plt.show()
return lines
def rawhexdump(self):
"""Prints an hexadecimal dump of each packet in the list"""
for p in self:
hexdump(self._elt2pkt(p))
def hexraw(self, lfilter=None):
"""Same as nsummary(), except that if a packet has a Raw layer, it will be hexdumped
lfilter: a truth function that decides whether a packet must be displayed"""
for i, res in enumerate(self.res):
p = self._elt2pkt(res)
if lfilter is not None and not lfilter(p):
continue
print("%s %s %s" % (conf.color_theme.id(i,fmt="%04i"),
p.sprintf("%.time%"),
self._elt2sum(res)))
if p.haslayer(conf.raw_layer):
hexdump(p.getlayer(conf.raw_layer).load)
def hexdump(self, lfilter=None):
"""Same as nsummary(), except that packets are also hexdumped
lfilter: a truth function that decides whether a packet must be displayed"""
for i, res in enumerate(self.res):
p = self._elt2pkt(res)
if lfilter is not None and not lfilter(p):
continue
print("%s %s %s" % (conf.color_theme.id(i,fmt="%04i"),
p.sprintf("%.time%"),
self._elt2sum(res)))
hexdump(p)
def padding(self, lfilter=None):
"""Same as hexraw(), for Padding layer"""
for i, res in enumerate(self.res):
p = self._elt2pkt(res)
if p.haslayer(conf.padding_layer):
if lfilter is None or lfilter(p):
print("%s %s %s" % (conf.color_theme.id(i,fmt="%04i"),
p.sprintf("%.time%"),
self._elt2sum(res)))
hexdump(p.getlayer(conf.padding_layer).load)
def nzpadding(self, lfilter=None):
"""Same as padding() but only non null padding"""
for i, res in enumerate(self.res):
p = self._elt2pkt(res)
if p.haslayer(conf.padding_layer):
pad = p.getlayer(conf.padding_layer).load
if pad == pad[0]*len(pad):
continue
if lfilter is None or lfilter(p):
print("%s %s %s" % (conf.color_theme.id(i,fmt="%04i"),
p.sprintf("%.time%"),
self._elt2sum(res)))
hexdump(p.getlayer(conf.padding_layer).load)
def conversations(self, getsrcdst=None,**kargs):
"""Graphes a conversations between sources and destinations and display it
(using graphviz and imagemagick)
getsrcdst: a function that takes an element of the list and
returns the source, the destination and optionally
a label. By default, returns the IP source and
destination from IP and ARP layers
type: output type (svg, ps, gif, jpg, etc.), passed to dot's "-T" option
target: filename or redirect. Defaults pipe to Imagemagick's display program
prog: which graphviz program to use"""
if getsrcdst is None:
def getsrcdst(pkt):
if 'IP' in pkt:
return (pkt['IP'].src, pkt['IP'].dst)
if 'ARP' in pkt:
return (pkt['ARP'].psrc, pkt['ARP'].pdst)
raise TypeError()
conv = {}
for p in self.res:
p = self._elt2pkt(p)
try:
c = getsrcdst(p)
except:
# No warning here: it's OK that getsrcdst() raises an
# exception, since it might be, for example, a
# function that expects a specific layer in each
# packet. The try/except approach is faster and
# considered more Pythonic than adding tests.
continue
if len(c) == 3:
conv.setdefault(c[:2], set()).add(c[2])
else:
conv[c] = conv.get(c, 0) + 1
gr = 'digraph "conv" {\n'
for (s, d), l in six.iteritems(conv):
gr += '\t "%s" -> "%s" [label="%s"]\n' % (
s, d, ', '.join(str(x) for x in l) if isinstance(l, set) else l
)
gr += "}\n"
return do_graph(gr, **kargs)
def afterglow(self, src=None, event=None, dst=None, **kargs):
"""Experimental clone attempt of http://sourceforge.net/projects/afterglow
each datum is reduced as src -> event -> dst and the data are graphed.
by default we have IP.src -> IP.dport -> IP.dst"""
if src is None:
src = lambda x: x['IP'].src
if event is None:
event = lambda x: x['IP'].dport
if dst is None:
dst = lambda x: x['IP'].dst
sl = {}
el = {}
dl = {}
for i in self.res:
try:
s,e,d = src(i),event(i),dst(i)
if s in sl:
n,l = sl[s]
n += 1
if e not in l:
l.append(e)
sl[s] = (n,l)
else:
sl[s] = (1,[e])
if e in el:
n,l = el[e]
n+=1
if d not in l:
l.append(d)
el[e] = (n,l)
else:
el[e] = (1,[d])
dl[d] = dl.get(d,0)+1
except:
continue
import math
def normalize(n):
return 2+math.log(n)/4.0
def minmax(x):
m, M = reduce(lambda a, b: (min(a[0], b[0]), max(a[1], b[1])),
((a, a) for a in x))
if m == M:
m = 0
if M == 0:
M = 1
return m, M
mins, maxs = minmax(x for x, _ in six.itervalues(sl))
mine, maxe = minmax(x for x, _ in six.itervalues(el))
mind, maxd = minmax(six.itervalues(dl))
gr = 'digraph "afterglow" {\n\tedge [len=2.5];\n'
gr += "# src nodes\n"
for s in sl:
n,l = sl[s]; n = 1+float(n-mins)/(maxs-mins)
gr += '"src.%s" [label = "%s", shape=box, fillcolor="#FF0000", style=filled, fixedsize=1, height=%.2f,width=%.2f];\n' % (repr(s),repr(s),n,n)
gr += "# event nodes\n"
for e in el:
n,l = el[e]; n = n = 1+float(n-mine)/(maxe-mine)
gr += '"evt.%s" [label = "%s", shape=circle, fillcolor="#00FFFF", style=filled, fixedsize=1, height=%.2f, width=%.2f];\n' % (repr(e),repr(e),n,n)
for d in dl:
n = dl[d]; n = n = 1+float(n-mind)/(maxd-mind)
gr += '"dst.%s" [label = "%s", shape=triangle, fillcolor="#0000ff", style=filled, fixedsize=1, height=%.2f, width=%.2f];\n' % (repr(d),repr(d),n,n)
gr += "###\n"
for s in sl:
n,l = sl[s]
for e in l:
gr += ' "src.%s" -> "evt.%s";\n' % (repr(s),repr(e))
for e in el:
n,l = el[e]
for d in l:
gr += ' "evt.%s" -> "dst.%s";\n' % (repr(e),repr(d))
gr += "}"
return do_graph(gr, **kargs)
def _dump_document(self, **kargs):
import pyx
d = pyx.document.document()
l = len(self.res)
for i, res in enumerate(self.res):
c = self._elt2pkt(res).canvas_dump(**kargs)
cbb = c.bbox()
c.text(cbb.left(),cbb.top()+1,r"\font\cmssfont=cmss12\cmssfont{Frame %i/%i}" % (i,l),[pyx.text.size.LARGE])
if conf.verb >= 2:
os.write(1, b".")
d.append(pyx.document.page(c, paperformat=pyx.document.paperformat.A4,
margin=1*pyx.unit.t_cm,
fittosize=1))
return d
def psdump(self, filename = None, **kargs):
"""Creates a multi-page postcript file with a psdump of every packet
filename: name of the file to write to. If empty, a temporary file is used and
conf.prog.psreader is called"""
d = self._dump_document(**kargs)
if filename is None:
filename = get_temp_file(autoext=".ps")
d.writePSfile(filename)
with ContextManagerSubprocess("psdump()"):
subprocess.Popen([conf.prog.psreader, filename+".ps"])
else:
d.writePSfile(filename)
print()
def pdfdump(self, filename = None, **kargs):
"""Creates a PDF file with a psdump of every packet
filename: name of the file to write to. If empty, a temporary file is used and
conf.prog.pdfreader is called"""
d = self._dump_document(**kargs)
if filename is None:
filename = get_temp_file(autoext=".pdf")
d.writePDFfile(filename)
with ContextManagerSubprocess("psdump()"):
subprocess.Popen([conf.prog.pdfreader, filename+".pdf"])
else:
d.writePDFfile(filename)
print()
def sr(self,multi=0):
"""sr([multi=1]) -> (SndRcvList, PacketList)
Matches packets in the list and return ( (matched couples), (unmatched packets) )"""
remain = self.res[:]
sr = []
i = 0
while i < len(remain):
s = remain[i]
j = i
while j < len(remain)-1:
j += 1
r = remain[j]
if r.answers(s):
sr.append((s,r))
if multi:
remain[i]._answered=1
remain[j]._answered=2
continue
del(remain[j])
del(remain[i])
i -= 1
break
i += 1
if multi:
remain = [x for x in remain if not hasattr(x, "_answered")]
return SndRcvList(sr),PacketList(remain)
def sessions(self, session_extractor=None):
if session_extractor is None:
def session_extractor(p):
sess = "Other"
if 'Ether' in p:
if 'IP' in p:
if 'TCP' in p:
sess = p.sprintf("TCP %IP.src%:%r,TCP.sport% > %IP.dst%:%r,TCP.dport%")
elif 'UDP' in p:
sess = p.sprintf("UDP %IP.src%:%r,UDP.sport% > %IP.dst%:%r,UDP.dport%")
elif 'ICMP' in p:
sess = p.sprintf("ICMP %IP.src% > %IP.dst% type=%r,ICMP.type% code=%r,ICMP.code% id=%ICMP.id%")
else:
sess = p.sprintf("IP %IP.src% > %IP.dst% proto=%IP.proto%")
elif 'ARP' in p:
sess = p.sprintf("ARP %ARP.psrc% > %ARP.pdst%")
else:
sess = p.sprintf("Ethernet type=%04xr,Ether.type%")
return sess
sessions = defaultdict(self.__class__)
for p in self.res:
sess = session_extractor(self._elt2pkt(p))
sessions[sess].append(p)
return dict(sessions)
def replace(self, *args, **kargs):
"""
lst.replace(<field>,[<oldvalue>,]<newvalue>)
lst.replace( (fld,[ov],nv),(fld,[ov,]nv),...)
if ov is None, all values are replaced
ex:
lst.replace( IP.src, "192.168.1.1", "10.0.0.1" )
lst.replace( IP.ttl, 64 )
lst.replace( (IP.ttl, 64), (TCP.sport, 666, 777), )
"""
delete_checksums = kargs.get("delete_checksums",False)
x=PacketList(name="Replaced %s" % self.listname)
if not isinstance(args[0], tuple):
args = (args,)
for p in self.res:
p = self._elt2pkt(p)
copied = False
for scheme in args:
fld = scheme[0]
old = scheme[1] # not used if len(scheme) == 2
new = scheme[-1]
for o in fld.owners:
if o in p:
if len(scheme) == 2 or p[o].getfieldval(fld.name) == old:
if not copied:
p = p.copy()
if delete_checksums:
p.delete_checksums()
copied = True
setattr(p[o], fld.name, new)
x.append(p)
return x
class SndRcvList(PacketList):
__slots__ = []
def __init__(self, res=None, name="Results", stats=None):
PacketList.__init__(self, res, name, stats)
def _elt2pkt(self, elt):
return elt[1]
def _elt2sum(self, elt):
return "%s ==> %s" % (elt[0].summary(),elt[1].summary())
| 1 | 12,032 | why did you remove `zip`? It is used! | secdev-scapy | py |
@@ -90,7 +90,7 @@ func (bs *BrokerStatus) MarkTopicUnknown(reason, format string, args ...interfac
brokerCondSet.Manage(bs).MarkUnknown(BrokerConditionTopic, reason, format, args...)
}
-func (bs *BrokerStatus) MarkTopicReady() {
+func (bs *BrokerStatus) MarkTopicReady(_ string) {
brokerCondSet.Manage(bs).MarkTrue(BrokerConditionTopic)
}
| 1 | /*
Copyright 2020 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
eventingv1beta1 "knative.dev/eventing/pkg/apis/eventing/v1beta1"
"knative.dev/pkg/apis"
)
var brokerCondSet = apis.NewLivingConditionSet(
eventingv1beta1.BrokerConditionAddressable,
BrokerConditionBrokerCell,
BrokerConditionTopic,
BrokerConditionSubscription,
)
const (
// BrokerConditionBrokerCell reports the availability of the Broker's BrokerCell.
BrokerConditionBrokerCell apis.ConditionType = "BrokerCellReady"
// BrokerConditionTopic reports the status of the Broker's PubSub topic.
// THis condition is specific to the Google Cloud Broker.
BrokerConditionTopic apis.ConditionType = "TopicReady"
// BrokerConditionSubscription reports the status of the Broker's PubSub
// subscription. This condition is specific to the Google Cloud Broker.
BrokerConditionSubscription apis.ConditionType = "SubscriptionReady"
)
// GetCondition returns the condition currently associated with the given type, or nil.
func (bs *BrokerStatus) GetCondition(t apis.ConditionType) *apis.Condition {
return brokerCondSet.Manage(bs).GetCondition(t)
}
// GetTopLevelCondition returns the top level Condition.
func (bs *BrokerStatus) GetTopLevelCondition() *apis.Condition {
return brokerCondSet.Manage(bs).GetTopLevelCondition()
}
// IsReady returns true if the resource is ready overall.
func (bs *BrokerStatus) IsReady() bool {
return brokerCondSet.Manage(bs).IsHappy()
}
// InitializeConditions sets relevant unset conditions to Unknown state.
func (bs *BrokerStatus) InitializeConditions() {
brokerCondSet.Manage(bs).InitializeConditions()
}
// SetAddress makes this Broker addressable by setting the hostname. It also
// sets the BrokerConditionAddressable to true.
func (bs *BrokerStatus) SetAddress(url *apis.URL) {
bs.Address.URL = url
if url != nil {
brokerCondSet.Manage(bs).MarkTrue(eventingv1beta1.BrokerConditionAddressable)
} else {
brokerCondSet.Manage(bs).MarkFalse(eventingv1beta1.BrokerConditionAddressable, "emptyURL", "URL is empty")
}
}
func (bs *BrokerStatus) MarkBrokerCellUnknown(reason, format string, args ...interface{}) {
brokerCondSet.Manage(bs).MarkUnknown(BrokerConditionBrokerCell, reason, format, args...)
}
func (bs *BrokerStatus) MarkBrokerCellFailed(reason, format string, args ...interface{}) {
brokerCondSet.Manage(bs).MarkFalse(BrokerConditionBrokerCell, reason, format, args...)
}
func (bs *BrokerStatus) MarkBrokerCellReady() {
brokerCondSet.Manage(bs).MarkTrue(BrokerConditionBrokerCell)
}
func (bs *BrokerStatus) MarkTopicFailed(reason, format string, args ...interface{}) {
brokerCondSet.Manage(bs).MarkFalse(BrokerConditionTopic, reason, format, args...)
}
func (bs *BrokerStatus) MarkTopicUnknown(reason, format string, args ...interface{}) {
brokerCondSet.Manage(bs).MarkUnknown(BrokerConditionTopic, reason, format, args...)
}
func (bs *BrokerStatus) MarkTopicReady() {
brokerCondSet.Manage(bs).MarkTrue(BrokerConditionTopic)
}
func (bs *BrokerStatus) MarkSubscriptionFailed(reason, format string, args ...interface{}) {
brokerCondSet.Manage(bs).MarkFalse(BrokerConditionSubscription, reason, format, args...)
}
func (bs *BrokerStatus) MarkSubscriptionUnknown(reason, format string, args ...interface{}) {
brokerCondSet.Manage(bs).MarkUnknown(BrokerConditionSubscription, reason, format, args...)
}
func (bs *BrokerStatus) MarkSubscriptionReady() {
brokerCondSet.Manage(bs).MarkTrue(BrokerConditionSubscription)
}
| 1 | 19,562 | Is there any reason that we don't add a similar condition like `ChannelConditionTopic` to the channel? | google-knative-gcp | go |
@@ -1,4 +1,4 @@
-<?php namespace TestVendor\Goto;
+<?php namespace TestVendor\_Goto;
use System\Classes\PluginBase;
| 1 | <?php namespace TestVendor\Goto;
use System\Classes\PluginBase;
class Plugin extends PluginBase
{
public function pluginDetails()
{
return [
'name' => 'Invalid Test Plugin',
'description' => 'Test plugin used by unit tests to detect plugins with invalid namespaces.',
'author' => 'Test Vendor'
];
}
}
| 1 | 19,341 | before php8 "goto" was a reserved word and was not allowed as part of the namespace. Now test checks for validity of plugin namespace according to PSR-4 | octobercms-october | php |
@@ -85,6 +85,7 @@ func (t *Transport) NewInbound(listener net.Listener, options ...InboundOption)
}
// NewSingleOutbound returns a new Outbound for the given adrress.
+// Note: This does not support TLS. See TLS example in doc.go.
func (t *Transport) NewSingleOutbound(address string, options ...OutboundOption) *Outbound {
return newSingleOutbound(t, address, options...)
} | 1 | // Copyright (c) 2018 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package grpc
import (
"net"
"sync"
"go.uber.org/multierr"
"go.uber.org/yarpc/api/peer"
"go.uber.org/yarpc/pkg/lifecycle"
)
// Transport is a grpc transport.Transport.
//
// This currently does not have any additional functionality over creating
// an Inbound or Outbound separately, but may in the future.
type Transport struct {
lock sync.Mutex
once *lifecycle.Once
options *transportOptions
addressToPeer map[string]*grpcPeer
}
// NewTransport returns a new Transport.
func NewTransport(options ...TransportOption) *Transport {
return newTransport(newTransportOptions(options))
}
func newTransport(transportOptions *transportOptions) *Transport {
return &Transport{
once: lifecycle.NewOnce(),
options: transportOptions,
addressToPeer: make(map[string]*grpcPeer),
}
}
// Start implements transport.Lifecycle#Start.
func (t *Transport) Start() error {
return t.once.Start(nil)
}
// Stop implements transport.Lifecycle#Stop.
func (t *Transport) Stop() error {
return t.once.Stop(func() error {
t.lock.Lock()
defer t.lock.Unlock()
for _, grpcPeer := range t.addressToPeer {
grpcPeer.stop()
}
var err error
for _, grpcPeer := range t.addressToPeer {
err = multierr.Append(err, grpcPeer.wait())
}
return err
})
}
// IsRunning implements transport.Lifecycle#IsRunning.
func (t *Transport) IsRunning() bool {
return t.once.IsRunning()
}
// NewInbound returns a new Inbound for the given listener.
func (t *Transport) NewInbound(listener net.Listener, options ...InboundOption) *Inbound {
return newInbound(t, listener, options...)
}
// NewSingleOutbound returns a new Outbound for the given adrress.
func (t *Transport) NewSingleOutbound(address string, options ...OutboundOption) *Outbound {
return newSingleOutbound(t, address, options...)
}
// NewOutbound returns a new Outbound for the given peer.Chooser.
func (t *Transport) NewOutbound(peerChooser peer.Chooser, options ...OutboundOption) *Outbound {
return newOutbound(t, peerChooser, options...)
}
// RetainPeer retains the peer.
func (t *Transport) RetainPeer(peerIdentifier peer.Identifier, peerSubscriber peer.Subscriber) (peer.Peer, error) {
t.lock.Lock()
defer t.lock.Unlock()
address := peerIdentifier.Identifier()
p, ok := t.addressToPeer[address]
if !ok {
var err error
p, err = newPeer(address, t)
if err != nil {
return nil, err
}
t.addressToPeer[address] = p
}
p.Subscribe(peerSubscriber)
return p, nil
}
// ReleasePeer releases the peer.
func (t *Transport) ReleasePeer(peerIdentifier peer.Identifier, peerSubscriber peer.Subscriber) error {
t.lock.Lock()
defer t.lock.Unlock()
address := peerIdentifier.Identifier()
p, ok := t.addressToPeer[address]
if !ok {
return peer.ErrTransportHasNoReferenceToPeer{
TransportName: "grpc.Transport",
PeerIdentifier: address,
}
}
if err := p.Unsubscribe(peerSubscriber); err != nil {
return err
}
if p.NumSubscribers() == 0 {
delete(t.addressToPeer, address)
p.stop()
return p.wait()
}
return nil
}
| 1 | 16,731 | nit: I'm not sure it can be addressed in this diff, but it seems odd that NewOutbound supports TLS but NewSingleOutbound does not. As a somewhat naive user I would expect the only difference between these two APIs is how peers are chosen. | yarpc-yarpc-go | go |
@@ -226,11 +226,17 @@ func (o *Outbound) CallOneway(ctx context.Context, treq *transport.Request) (tra
return nil, yarpcerrors.InvalidArgumentErrorf("request for http oneway outbound was nil")
}
- _, err := o.call(ctx, treq)
+ // res is used to close the response body to avoid memory/connection leak
+ // even when the response body is empty
+ res, err := o.call(ctx, treq)
if err != nil {
return nil, err
}
+ if err = res.Body.Close(); err != nil {
+ return nil, yarpcerrors.Newf(yarpcerrors.CodeInternal, err.Error())
+ }
+
return time.Now(), nil
}
| 1 | // Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package http
import (
"context"
"fmt"
"io/ioutil"
"log"
"net/http"
"net/url"
"strconv"
"strings"
"time"
"github.com/opentracing/opentracing-go"
"github.com/opentracing/opentracing-go/ext"
opentracinglog "github.com/opentracing/opentracing-go/log"
"go.uber.org/yarpc"
"go.uber.org/yarpc/api/peer"
"go.uber.org/yarpc/api/transport"
"go.uber.org/yarpc/api/x/introspection"
intyarpcerrors "go.uber.org/yarpc/internal/yarpcerrors"
peerchooser "go.uber.org/yarpc/peer"
"go.uber.org/yarpc/peer/hostport"
"go.uber.org/yarpc/pkg/lifecycle"
"go.uber.org/yarpc/yarpcerrors"
)
// this ensures the HTTP outbound implements both transport.Outbound interfaces
var (
_ transport.Namer = (*Outbound)(nil)
_ transport.UnaryOutbound = (*Outbound)(nil)
_ transport.OnewayOutbound = (*Outbound)(nil)
_ introspection.IntrospectableOutbound = (*Outbound)(nil)
)
var defaultURLTemplate, _ = url.Parse("http://localhost")
// OutboundOption customizes an HTTP Outbound.
type OutboundOption func(*Outbound)
func (OutboundOption) httpOption() {}
// URLTemplate specifies the URL this outbound makes requests to. For
// peer.Chooser-based outbounds, the peer (host:port) spection of the URL may
// vary from call to call but the rest will remain unchanged. For single-peer
// outbounds, the URL will be used as-is.
func URLTemplate(template string) OutboundOption {
return func(o *Outbound) {
o.setURLTemplate(template)
}
}
// AddHeader specifies that an HTTP outbound should always include the given
// header in outgoung requests.
//
// httpTransport.NewOutbound(chooser, http.AddHeader("X-Token", "TOKEN"))
//
// Note that headers starting with "Rpc-" are reserved by YARPC. This function
// will panic if the header starts with "Rpc-".
func AddHeader(key, value string) OutboundOption {
if strings.HasPrefix(strings.ToLower(key), "rpc-") {
panic(fmt.Errorf(
"invalid header name %q: "+
`headers starting with "Rpc-" are reserved by YARPC`, key))
}
return func(o *Outbound) {
if o.headers == nil {
o.headers = make(http.Header)
}
o.headers.Add(key, value)
}
}
// NewOutbound builds an HTTP outbound that sends requests to peers supplied
// by the given peer.Chooser. The URL template for used for the different
// peers may be customized using the URLTemplate option.
//
// The peer chooser and outbound must share the same transport, in this case
// the HTTP transport.
// The peer chooser must use the transport's RetainPeer to obtain peer
// instances and return those peers to the outbound when it calls Choose.
// The concrete peer type is private and intrinsic to the HTTP transport.
func (t *Transport) NewOutbound(chooser peer.Chooser, opts ...OutboundOption) *Outbound {
o := &Outbound{
once: lifecycle.NewOnce(),
chooser: chooser,
urlTemplate: defaultURLTemplate,
tracer: t.tracer,
transport: t,
bothResponseError: true,
}
for _, opt := range opts {
opt(o)
}
o.sender = &transportSender{Client: t.client}
return o
}
// NewOutbound builds an HTTP outbound that sends requests to peers supplied
// by the given peer.Chooser. The URL template for used for the different
// peers may be customized using the URLTemplate option.
//
// The peer chooser and outbound must share the same transport, in this case
// the HTTP transport.
// The peer chooser must use the transport's RetainPeer to obtain peer
// instances and return those peers to the outbound when it calls Choose.
// The concrete peer type is private and intrinsic to the HTTP transport.
func NewOutbound(chooser peer.Chooser, opts ...OutboundOption) *Outbound {
return NewTransport().NewOutbound(chooser, opts...)
}
// NewSingleOutbound builds an outbound that sends YARPC requests over HTTP
// to the specified URL.
//
// The URLTemplate option has no effect in this form.
func (t *Transport) NewSingleOutbound(uri string, opts ...OutboundOption) *Outbound {
parsedURL, err := url.Parse(uri)
if err != nil {
panic(err.Error())
}
chooser := peerchooser.NewSingle(hostport.PeerIdentifier(parsedURL.Host), t)
o := t.NewOutbound(chooser)
for _, opt := range opts {
opt(o)
}
o.setURLTemplate(uri)
return o
}
// Outbound sends YARPC requests over HTTP. It may be constructed using the
// NewOutbound function or the NewOutbound or NewSingleOutbound methods on the
// HTTP Transport. It is recommended that services use a single HTTP transport
// to construct all HTTP outbounds, ensuring efficient sharing of resources
// across the different outbounds.
type Outbound struct {
chooser peer.Chooser
urlTemplate *url.URL
tracer opentracing.Tracer
transport *Transport
sender sender
// Headers to add to all outgoing requests.
headers http.Header
once *lifecycle.Once
// should only be false in testing
bothResponseError bool
}
// TransportName is the transport name that will be set on `transport.Request` struct.
func (o *Outbound) TransportName() string {
return TransportName
}
// setURLTemplate configures an alternate URL template.
// The host:port portion of the URL template gets replaced by the chosen peer's
// identifier for each outbound request.
func (o *Outbound) setURLTemplate(URL string) {
parsedURL, err := url.Parse(URL)
if err != nil {
log.Fatalf("failed to configure HTTP outbound: invalid URL template %q: %s", URL, err)
}
o.urlTemplate = parsedURL
}
// Transports returns the outbound's HTTP transport.
func (o *Outbound) Transports() []transport.Transport {
return []transport.Transport{o.transport}
}
// Chooser returns the outbound's peer chooser.
func (o *Outbound) Chooser() peer.Chooser {
return o.chooser
}
// Start the HTTP outbound
func (o *Outbound) Start() error {
return o.once.Start(o.chooser.Start)
}
// Stop the HTTP outbound
func (o *Outbound) Stop() error {
return o.once.Stop(o.chooser.Stop)
}
// IsRunning returns whether the Outbound is running.
func (o *Outbound) IsRunning() bool {
return o.once.IsRunning()
}
// Call makes a HTTP request
func (o *Outbound) Call(ctx context.Context, treq *transport.Request) (*transport.Response, error) {
if treq == nil {
return nil, yarpcerrors.InvalidArgumentErrorf("request for http unary outbound was nil")
}
return o.call(ctx, treq)
}
// CallOneway makes a oneway request
func (o *Outbound) CallOneway(ctx context.Context, treq *transport.Request) (transport.Ack, error) {
if treq == nil {
return nil, yarpcerrors.InvalidArgumentErrorf("request for http oneway outbound was nil")
}
_, err := o.call(ctx, treq)
if err != nil {
return nil, err
}
return time.Now(), nil
}
func (o *Outbound) call(ctx context.Context, treq *transport.Request) (*transport.Response, error) {
start := time.Now()
deadline, ok := ctx.Deadline()
if !ok {
return nil, yarpcerrors.Newf(yarpcerrors.CodeInvalidArgument, "missing context deadline")
}
ttl := deadline.Sub(start)
hreq, err := o.createRequest(treq)
if err != nil {
return nil, err
}
hreq.Header = applicationHeaders.ToHTTPHeaders(treq.Headers, nil)
ctx, hreq, span, err := o.withOpentracingSpan(ctx, hreq, treq, start)
if err != nil {
return nil, err
}
defer span.Finish()
hreq = o.withCoreHeaders(hreq, treq, ttl)
hreq = hreq.WithContext(ctx)
response, err := o.roundTrip(hreq, treq, start, o.transport.client)
if err != nil {
span.SetTag("error", true)
span.LogFields(opentracinglog.String("event", err.Error()))
return nil, err
}
span.SetTag("http.status_code", response.StatusCode)
// Service name match validation, return yarpcerrors.CodeInternal error if not match
if match, resSvcName := checkServiceMatch(treq.Service, response.Header); !match {
return nil, transport.UpdateSpanWithErr(span,
yarpcerrors.InternalErrorf("service name sent from the request "+
"does not match the service name received in the response, sent %q, got: %q", treq.Service, resSvcName))
}
tres := &transport.Response{
Headers: applicationHeaders.FromHTTPHeaders(response.Header, transport.NewHeaders()),
Body: response.Body,
BodySize: int(response.ContentLength),
ApplicationError: response.Header.Get(ApplicationStatusHeader) == ApplicationErrorStatus,
ApplicationErrorMeta: &transport.ApplicationErrorMeta{
Details: response.Header.Get(_applicationErrorDetailsHeader),
Name: response.Header.Get(_applicationErrorNameHeader),
Code: getYARPCApplicationErrorCode(response.Header.Get(_applicationErrorCodeHeader)),
},
}
bothResponseError := response.Header.Get(BothResponseErrorHeader) == AcceptTrue
if bothResponseError && o.bothResponseError {
if response.StatusCode >= 300 {
return getYARPCErrorFromResponse(tres, response, true)
}
return tres, nil
}
if response.StatusCode >= 200 && response.StatusCode < 300 {
return tres, nil
}
return getYARPCErrorFromResponse(tres, response, false)
}
func getYARPCApplicationErrorCode(code string) *yarpcerrors.Code {
if code == "" {
return nil
}
errorCode, err := strconv.Atoi(code)
if err != nil {
return nil
}
yarpcCode := yarpcerrors.Code(errorCode)
return &yarpcCode
}
func (o *Outbound) getPeerForRequest(ctx context.Context, treq *transport.Request) (*httpPeer, func(error), error) {
p, onFinish, err := o.chooser.Choose(ctx, treq)
if err != nil {
return nil, nil, err
}
hpPeer, ok := p.(*httpPeer)
if !ok {
return nil, nil, peer.ErrInvalidPeerConversion{
Peer: p,
ExpectedType: "*httpPeer",
}
}
return hpPeer, onFinish, nil
}
func (o *Outbound) createRequest(treq *transport.Request) (*http.Request, error) {
newURL := *o.urlTemplate
return http.NewRequest("POST", newURL.String(), treq.Body)
}
func (o *Outbound) withOpentracingSpan(ctx context.Context, req *http.Request, treq *transport.Request, start time.Time) (context.Context, *http.Request, opentracing.Span, error) {
// Apply HTTP Context headers for tracing and baggage carried by tracing.
tracer := o.tracer
var parent opentracing.SpanContext // ok to be nil
if parentSpan := opentracing.SpanFromContext(ctx); parentSpan != nil {
parent = parentSpan.Context()
}
tags := opentracing.Tags{
"rpc.caller": treq.Caller,
"rpc.service": treq.Service,
"rpc.encoding": treq.Encoding,
"rpc.transport": "http",
}
for k, v := range yarpc.OpentracingTags {
tags[k] = v
}
span := tracer.StartSpan(
treq.Procedure,
opentracing.StartTime(start),
opentracing.ChildOf(parent),
tags,
)
ext.PeerService.Set(span, treq.Service)
ext.SpanKindRPCClient.Set(span)
ext.HTTPUrl.Set(span, req.URL.String())
ctx = opentracing.ContextWithSpan(ctx, span)
err := tracer.Inject(
span.Context(),
opentracing.HTTPHeaders,
opentracing.HTTPHeadersCarrier(req.Header),
)
return ctx, req, span, err
}
func (o *Outbound) withCoreHeaders(req *http.Request, treq *transport.Request, ttl time.Duration) *http.Request {
// Add default headers to all requests.
for k, vs := range o.headers {
for _, v := range vs {
req.Header.Add(k, v)
}
}
req.Header.Set(CallerHeader, treq.Caller)
req.Header.Set(ServiceHeader, treq.Service)
req.Header.Set(ProcedureHeader, treq.Procedure)
if ttl != 0 {
req.Header.Set(TTLMSHeader, fmt.Sprintf("%d", ttl/time.Millisecond))
}
if treq.ShardKey != "" {
req.Header.Set(ShardKeyHeader, treq.ShardKey)
}
if treq.RoutingKey != "" {
req.Header.Set(RoutingKeyHeader, treq.RoutingKey)
}
if treq.RoutingDelegate != "" {
req.Header.Set(RoutingDelegateHeader, treq.RoutingDelegate)
}
encoding := string(treq.Encoding)
if encoding != "" {
req.Header.Set(EncodingHeader, encoding)
}
if o.bothResponseError {
req.Header.Set(AcceptsBothResponseErrorHeader, AcceptTrue)
}
return req
}
func getYARPCErrorFromResponse(tres *transport.Response, response *http.Response, bothResponseError bool) (*transport.Response, error) {
var contents string
var details []byte
if bothResponseError {
contents = response.Header.Get(ErrorMessageHeader)
if response.Header.Get(ErrorDetailsHeader) != "" {
// the contents of this header and the body should be the same, but
// use the contents in the body, in case the contents were not ASCII and
// the contents were not preserved in the header.
var err error
details, err = ioutil.ReadAll(response.Body)
if err != nil {
return tres, yarpcerrors.Newf(yarpcerrors.CodeInternal, err.Error())
}
if err := response.Body.Close(); err != nil {
return tres, yarpcerrors.Newf(yarpcerrors.CodeInternal, err.Error())
}
// nil out body so that it isn't read later
tres.Body = nil
}
} else {
contentsBytes, err := ioutil.ReadAll(response.Body)
if err != nil {
return nil, yarpcerrors.Newf(yarpcerrors.CodeInternal, err.Error())
}
contents = string(contentsBytes)
if err := response.Body.Close(); err != nil {
return nil, yarpcerrors.Newf(yarpcerrors.CodeInternal, err.Error())
}
}
// use the status code if we can't get a code from the headers
code := statusCodeToBestCode(response.StatusCode)
if errorCodeText := response.Header.Get(ErrorCodeHeader); errorCodeText != "" {
var errorCode yarpcerrors.Code
// TODO: what to do with error?
if err := errorCode.UnmarshalText([]byte(errorCodeText)); err == nil {
code = errorCode
}
}
yarpcErr := intyarpcerrors.NewWithNamef(
code,
response.Header.Get(ErrorNameHeader),
strings.TrimSuffix(contents, "\n"),
).WithDetails(details)
if bothResponseError {
return tres, yarpcErr
}
return nil, yarpcErr
}
// Only does verification if there is a response header
func checkServiceMatch(reqSvcName string, resHeaders http.Header) (bool, string) {
serviceName := resHeaders.Get(ServiceHeader)
return serviceName == "" || serviceName == reqSvcName, serviceName
}
// RoundTrip implements the http.RoundTripper interface, making a YARPC HTTP outbound suitable as a
// Transport when constructing an HTTP Client. An HTTP client is suitable only for relative paths to
// a single outbound service. The HTTP outbound overrides the host:port portion of the URL of the
// provided request.
//
// Sample usage:
//
// client := http.Client{Transport: outbound}
//
// Thereafter use the Golang standard library HTTP to send requests with this client.
//
// ctx, cancel := context.WithTimeout(context.Background(), time.Second)
// defer cancel()
// req, err := http.NewRequest("GET", "http://example.com/", nil /* body */)
// req = req.WithContext(ctx)
// res, err := client.Do(req)
//
// All requests must have a deadline on the context.
// The peer chooser for raw HTTP requests will receive a YARPC transport.Request with no body.
//
// OpenTracing information must be added manually, before this call, to support context propagation.
func (o *Outbound) RoundTrip(hreq *http.Request) (*http.Response, error) {
return o.roundTrip(hreq, nil /* treq */, time.Now(), o.sender)
}
func (o *Outbound) roundTrip(hreq *http.Request, treq *transport.Request, start time.Time, sender sender) (*http.Response, error) {
ctx := hreq.Context()
deadline, ok := ctx.Deadline()
if !ok {
return nil, yarpcerrors.Newf(
yarpcerrors.CodeInvalidArgument,
"missing context deadline")
}
ttl := deadline.Sub(start)
// When sending requests through the RoundTrip method, we construct the
// transport request from the HTTP headers as if it were an inbound
// request.
// The API for setting transport metadata for an outbound request when
// using the go stdlib HTTP client is to use headers as the YAPRC HTTP
// transport header conventions.
if treq == nil {
treq = &transport.Request{
Caller: hreq.Header.Get(CallerHeader),
Service: hreq.Header.Get(ServiceHeader),
Encoding: transport.Encoding(hreq.Header.Get(EncodingHeader)),
Procedure: hreq.Header.Get(ProcedureHeader),
ShardKey: hreq.Header.Get(ShardKeyHeader),
RoutingKey: hreq.Header.Get(RoutingKeyHeader),
RoutingDelegate: hreq.Header.Get(RoutingDelegateHeader),
Headers: applicationHeaders.FromHTTPHeaders(hreq.Header, transport.Headers{}),
}
}
if err := o.once.WaitUntilRunning(ctx); err != nil {
return nil, intyarpcerrors.AnnotateWithInfo(
yarpcerrors.FromError(err),
"error waiting for HTTP outbound to start for service: %s",
treq.Service)
}
p, onFinish, err := o.getPeerForRequest(ctx, treq)
if err != nil {
return nil, err
}
hres, err := o.doWithPeer(ctx, hreq, treq, start, ttl, p, sender)
// Call the onFinish method before returning (with the error from call with peer)
onFinish(err)
return hres, err
}
func (o *Outbound) doWithPeer(
ctx context.Context,
hreq *http.Request,
treq *transport.Request,
start time.Time,
ttl time.Duration,
p *httpPeer,
sender sender,
) (*http.Response, error) {
hreq.URL.Host = p.HostPort()
response, err := sender.Do(hreq.WithContext(ctx))
if err != nil {
// Workaround borrowed from ctxhttp until
// https://github.com/golang/go/issues/17711 is resolved.
select {
case <-ctx.Done():
err = ctx.Err()
default:
}
if err == context.DeadlineExceeded {
// Note that the connection experienced a time out, which may
// indicate that the connection is half-open, that the destination
// died without sending a TCP FIN packet.
p.onSuspect()
end := time.Now()
return nil, yarpcerrors.Newf(
yarpcerrors.CodeDeadlineExceeded,
"client timeout for procedure %q of service %q after %v",
treq.Procedure, treq.Service, end.Sub(start))
}
// Note that the connection may have been lost so the peer connection
// maintenance loop resumes probing for availability.
p.onDisconnected()
return nil, yarpcerrors.Newf(yarpcerrors.CodeUnknown, "unknown error from http client: %s", err.Error())
}
return response, nil
}
// Introspect returns basic status about this outbound.
func (o *Outbound) Introspect() introspection.OutboundStatus {
state := "Stopped"
if o.IsRunning() {
state = "Running"
}
var chooser introspection.ChooserStatus
if i, ok := o.chooser.(introspection.IntrospectableChooser); ok {
chooser = i.Introspect()
} else {
chooser = introspection.ChooserStatus{
Name: "Introspection not available",
}
}
return introspection.OutboundStatus{
Transport: "http",
Endpoint: o.urlTemplate.String(),
State: state,
Chooser: chooser,
}
}
| 1 | 19,439 | I would advise to enhance the test for the method `CallOneway` - we should have a test very similar to `TestCallSuccess`. We should test: - Success with response (even if it is callOneway) - Success with no response and empty payload - Errors | yarpc-yarpc-go | go |
@@ -8,7 +8,7 @@ import java.util.regex.Pattern;
public class EmailAddressValidator implements Validator {
private static final Pattern EMAIL_ADDRESS_PATTERN = Pattern.compile(
- "[a-zA-Z0-9\\+\\.\\_\\%\\-\\+]{1,256}" +
+ "[a-zA-Z0-9\\+\\.\\_\\%\\-]{1,256}" +
"\\@" +
"[a-zA-Z0-9][a-zA-Z0-9\\-]{0,64}" +
"(" + | 1 |
package com.fsck.k9;
import android.text.util.Rfc822Tokenizer;
import android.widget.AutoCompleteTextView.Validator;
import java.util.regex.Pattern;
public class EmailAddressValidator implements Validator {
private static final Pattern EMAIL_ADDRESS_PATTERN = Pattern.compile(
"[a-zA-Z0-9\\+\\.\\_\\%\\-\\+]{1,256}" +
"\\@" +
"[a-zA-Z0-9][a-zA-Z0-9\\-]{0,64}" +
"(" +
"\\." +
"[a-zA-Z0-9][a-zA-Z0-9\\-]{0,25}" +
")+"
);
public CharSequence fixText(CharSequence invalidText) {
return "";
}
public boolean isValid(CharSequence text) {
return Rfc822Tokenizer.tokenize(text).length > 0;
}
public boolean isValidAddressOnly(CharSequence text) {
return EMAIL_ADDRESS_PATTERN.matcher(text).matches();
}
}
| 1 | 15,602 | We're already matching +. Not sure why this changed. | k9mail-k-9 | java |
@@ -34,6 +34,14 @@ class Configuration implements ConfigurationInterface
->children()
->scalarNode('db_driver')->isRequired()->end()
->scalarNode('default_context')->isRequired()->end()
+ ->scalarNode('category_manager')
+ ->info('if sonata-project/classification exists will set "sonata.classification.manager.category" | if you want to define your own category manager you need to implement \Sonata\MediaBundle\Model\CategoryManagerInterface and set the service name here')
+ ->defaultValue(null)
+ ->end()
+ ->scalarNode('force_disable_category')
+ ->info('true IF you really want to disable the relation with category')
+ ->defaultValue(false)
+ ->end()
->end()
;
| 1 | <?php
/*
* This file is part of the Sonata Project package.
*
* (c) Thomas Rabaix <[email protected]>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
namespace Sonata\MediaBundle\DependencyInjection;
use Symfony\Component\Config\Definition\Builder\ArrayNodeDefinition;
use Symfony\Component\Config\Definition\Builder\TreeBuilder;
use Symfony\Component\Config\Definition\ConfigurationInterface;
/**
* This is the class that validates and merges configuration from your app/config files.
*
* To learn more see {@link http://symfony.com/doc/current/cookbook/bundles/extension.html#cookbook-bundles-extension-config-class}
*/
class Configuration implements ConfigurationInterface
{
/**
* {@inheritdoc}
*/
public function getConfigTreeBuilder()
{
$treeBuilder = new TreeBuilder();
$node = $treeBuilder->root('sonata_media');
$node
->children()
->scalarNode('db_driver')->isRequired()->end()
->scalarNode('default_context')->isRequired()->end()
->end()
;
$this->addContextsSection($node);
$this->addCdnSection($node);
$this->addFilesystemSection($node);
$this->addProvidersSection($node);
$this->addExtraSection($node);
$this->addModelSection($node);
$this->addBuzzSection($node);
$this->addResizerSection($node);
return $treeBuilder;
}
/**
* @param ArrayNodeDefinition $node
*/
private function addContextsSection(ArrayNodeDefinition $node)
{
$node
->children()
->arrayNode('contexts')
->useAttributeAsKey('id')
->prototype('array')
->children()
->arrayNode('download')
->addDefaultsIfNotSet()
->children()
->scalarNode('strategy')->defaultValue('sonata.media.security.superadmin_strategy')->end()
->scalarNode('mode')->defaultValue('http')->end()
->end()
->end()
->arrayNode('providers')
->prototype('scalar')
->defaultValue(array())
->end()
->end()
->arrayNode('formats')
->isRequired()
->useAttributeAsKey('id')
->prototype('array')
->children()
->scalarNode('width')->defaultValue(false)->end()
->scalarNode('height')->defaultValue(false)->end()
->scalarNode('quality')->defaultValue(80)->end()
->scalarNode('format')->defaultValue('jpg')->end()
->scalarNode('constraint')->defaultValue(true)->end()
->end()
->end()
->end()
->end()
->end()
->end()
->end()
;
}
/**
* @param ArrayNodeDefinition $node
*/
private function addCdnSection(ArrayNodeDefinition $node)
{
$node
->children()
->arrayNode('cdn')
->children()
->arrayNode('server')
->addDefaultsIfNotSet()
->children()
->scalarNode('path')->defaultValue('/uploads/media')->end()
->end()
->end()
->arrayNode('panther')
->children()
->scalarNode('path')
->info('e.g. http://domain.pantherportal.com/uploads/media')
->isRequired()
->end()
->scalarNode('site_id')->isRequired()->end()
->scalarNode('password')->isRequired()->end()
->scalarNode('username')->isRequired()->end()
->end()
->end()
->arrayNode('cloudfront')
->children()
->scalarNode('path')
->info('e.g. http://xxxxxxxxxxxxxx.cloudfront.net/uploads/media')
->isRequired()
->end()
->scalarNode('distribution_id')->isRequired()->end()
->scalarNode('key')->isRequired()->end()
->scalarNode('secret')->isRequired()->end()
->end()
->end()
->arrayNode('fallback')
->children()
->scalarNode('master')->isRequired()->end()
->scalarNode('fallback')->isRequired()->end()
->end()
->end()
->end()
->end()
->end()
;
}
/**
* @param ArrayNodeDefinition $node
*/
private function addFilesystemSection(ArrayNodeDefinition $node)
{
$node
->children()
->arrayNode('filesystem')
->children()
->arrayNode('local')
->addDefaultsIfNotSet()
->children()
->scalarNode('directory')->defaultValue('%kernel.root_dir%/../web/uploads/media')->end()
->scalarNode('create')->defaultValue(false)->end()
->end()
->end()
->arrayNode('ftp')
->children()
->scalarNode('directory')->isRequired()->end()
->scalarNode('host')->isRequired()->end()
->scalarNode('username')->isRequired()->end()
->scalarNode('password')->isRequired()->end()
->scalarNode('port')->defaultValue(21)->end()
->scalarNode('passive')->defaultValue(false)->end()
->scalarNode('create')->defaultValue(false)->end()
->scalarNode('mode')->defaultValue(defined('FTP_BINARY') ? FTP_BINARY : false)->end()
->end()
->end()
->arrayNode('s3')
->children()
->scalarNode('directory')->defaultValue('')->end()
->scalarNode('bucket')->isRequired()->end()
->scalarNode('accessKey')->isRequired()->end()
->scalarNode('secretKey')->isRequired()->end()
->scalarNode('create')->defaultValue(false)->end()
->scalarNode('storage')
->defaultValue('standard')
->validate()
->ifNotInArray(array('standard', 'reduced'))
->thenInvalid('Invalid storage type - "%s"')
->end()
->end()
->scalarNode('cache_control')->defaultValue('')->end()
->scalarNode('acl')
->defaultValue('public')
->validate()
->ifNotInArray(array('private', 'public', 'open', 'auth_read', 'owner_read', 'owner_full_control'))
->thenInvalid('Invalid acl permission - "%s"')
->end()
->end()
->scalarNode('encryption')
->defaultValue('')
->validate()
->ifNotInArray(array('aes256'))
->thenInvalid('Invalid encryption type - "%s"')
->end()
->end()
->scalarNode('region')->defaultValue('s3.amazonaws.com')->end()
->arrayNode('meta')
->useAttributeAsKey('name')
->prototype('scalar')
->end()
->end()
->end()
->end()
->arrayNode('mogilefs')
->children()
->scalarNode('domain')->isRequired()->end()
->arrayNode('hosts')
->prototype('scalar')->end()
->isRequired()
->end()
->end()
->end()
->arrayNode('replicate')
->children()
->scalarNode('master')->isRequired()->end()
->scalarNode('slave')->isRequired()->end()
->end()
->end()
->arrayNode('openstack')
->children()
->scalarNode('url')->isRequired()->end()
->arrayNode('secret')
->children()
->scalarNode('username')->isRequired()->end()
->scalarNode('password')->isRequired()->end()
->end()
->end()
->scalarNode('region')->end()
->scalarNode('containerName')->defaultValue('media')->end()
->scalarNode('create_container')->defaultValue(false)->end()
->end()
->end()
->arrayNode('rackspace')
->children()
->scalarNode('url')->isRequired()->end()
->arrayNode('secret')
->children()
->scalarNode('username')->isRequired()->end()
->scalarNode('apiKey')->isRequired()->end()
->end()
->end()
->scalarNode('region')->isRequired()->end()
->scalarNode('containerName')->defaultValue('media')->end()
->scalarNode('create_container')->defaultValue(false)->end()
->end()
->end()
->end()
->end()
->end()
->end()
;
}
/**
* @param ArrayNodeDefinition $node
*/
private function addProvidersSection(ArrayNodeDefinition $node)
{
$node
->children()
->arrayNode('providers')
->addDefaultsIfNotSet()
->children()
->arrayNode('file')
->addDefaultsIfNotSet()
->children()
->scalarNode('service')->defaultValue('sonata.media.provider.file')->end()
->scalarNode('resizer')->defaultValue(false)->end()
->scalarNode('filesystem')->defaultValue('sonata.media.filesystem.local')->end()
->scalarNode('cdn')->defaultValue('sonata.media.cdn.server')->end()
->scalarNode('generator')->defaultValue('sonata.media.generator.default')->end()
->scalarNode('thumbnail')->defaultValue('sonata.media.thumbnail.format')->end()
->arrayNode('allowed_extensions')
->prototype('scalar')->end()
->defaultValue(array(
'pdf', 'txt', 'rtf',
'doc', 'docx', 'xls', 'xlsx', 'ppt', 'pptx',
'odt', 'odg', 'odp', 'ods', 'odc', 'odf', 'odb',
'csv',
'xml',
))
->end()
->arrayNode('allowed_mime_types')
->prototype('scalar')->end()
->defaultValue(array(
'application/pdf', 'application/x-pdf', 'application/rtf', 'text/html', 'text/rtf', 'text/plain',
'application/excel', 'application/msword', 'application/vnd.ms-excel', 'application/vnd.ms-powerpoint',
'application/vnd.ms-powerpoint', 'application/vnd.oasis.opendocument.text', 'application/vnd.oasis.opendocument.graphics', 'application/vnd.oasis.opendocument.presentation', 'application/vnd.oasis.opendocument.spreadsheet', 'application/vnd.oasis.opendocument.chart', 'application/vnd.oasis.opendocument.formula', 'application/vnd.oasis.opendocument.database', 'application/vnd.oasis.opendocument.image',
'text/comma-separated-values',
'text/xml', 'application/xml',
'application/zip', // seems to be used for xlsx document ...
))
->end()
->end()
->end()
->arrayNode('image')
->addDefaultsIfNotSet()
->children()
->scalarNode('service')->defaultValue('sonata.media.provider.image')->end()
->scalarNode('resizer')->defaultValue('sonata.media.resizer.simple')->end()
->scalarNode('filesystem')->defaultValue('sonata.media.filesystem.local')->end()
->scalarNode('cdn')->defaultValue('sonata.media.cdn.server')->end()
->scalarNode('generator')->defaultValue('sonata.media.generator.default')->end()
->scalarNode('thumbnail')->defaultValue('sonata.media.thumbnail.format')->end()
->scalarNode('adapter')->defaultValue('sonata.media.adapter.image.gd')->end()
->arrayNode('allowed_extensions')
->prototype('scalar')->end()
->defaultValue(array('jpg', 'png', 'jpeg'))
->end()
->arrayNode('allowed_mime_types')
->prototype('scalar')->end()
->defaultValue(array(
'image/pjpeg',
'image/jpeg',
'image/png',
'image/x-png',
))
->end()
->end()
->end()
->arrayNode('youtube')
->addDefaultsIfNotSet()
->children()
->scalarNode('service')->defaultValue('sonata.media.provider.youtube')->end()
->scalarNode('resizer')->defaultValue('sonata.media.resizer.simple')->end()
->scalarNode('filesystem')->defaultValue('sonata.media.filesystem.local')->end()
->scalarNode('cdn')->defaultValue('sonata.media.cdn.server')->end()
->scalarNode('generator')->defaultValue('sonata.media.generator.default')->end()
->scalarNode('thumbnail')->defaultValue('sonata.media.thumbnail.format')->end()
->scalarNode('html5')->defaultValue(false)->end()
->end()
->end()
->arrayNode('dailymotion')
->addDefaultsIfNotSet()
->children()
->scalarNode('service')->defaultValue('sonata.media.provider.dailymotion')->end()
->scalarNode('resizer')->defaultValue('sonata.media.resizer.simple')->end()
->scalarNode('filesystem')->defaultValue('sonata.media.filesystem.local')->end()
->scalarNode('cdn')->defaultValue('sonata.media.cdn.server')->end()
->scalarNode('generator')->defaultValue('sonata.media.generator.default')->end()
->scalarNode('thumbnail')->defaultValue('sonata.media.thumbnail.format')->end()
->end()
->end()
->arrayNode('vimeo')
->addDefaultsIfNotSet()
->children()
->scalarNode('service')->defaultValue('sonata.media.provider.vimeo')->end()
->scalarNode('resizer')->defaultValue('sonata.media.resizer.simple')->end()
->scalarNode('filesystem')->defaultValue('sonata.media.filesystem.local')->end()
->scalarNode('cdn')->defaultValue('sonata.media.cdn.server')->end()
->scalarNode('generator')->defaultValue('sonata.media.generator.default')->end()
->scalarNode('thumbnail')->defaultValue('sonata.media.thumbnail.format')->end()
->end()
->end()
->end()
->end()
->end()
;
}
/**
* @param ArrayNodeDefinition $node
*/
private function addExtraSection(ArrayNodeDefinition $node)
{
$node
->children()
->arrayNode('pixlr')
->info('More info at https://pixlr.com/')
->addDefaultsIfNotSet()
->children()
->scalarNode('enabled')->defaultValue(false)->end()
->scalarNode('secret')->defaultValue(sha1(uniqid(rand(1, 9999), true)))->end()
->scalarNode('referrer')->defaultValue('Sonata Media')->end()
->end()
->end()
->end()
;
}
/**
* @param ArrayNodeDefinition $node
*/
private function addModelSection(ArrayNodeDefinition $node)
{
$node
->children()
->arrayNode('class')
->addDefaultsIfNotSet()
->children()
->scalarNode('media')->defaultValue('Application\\Sonata\\MediaBundle\\Entity\\Media')->end()
->scalarNode('gallery')->defaultValue('Application\\Sonata\\MediaBundle\\Entity\\Gallery')->end()
->scalarNode('gallery_has_media')->defaultValue('Application\\Sonata\\MediaBundle\\Entity\\GalleryHasMedia')->end()
->scalarNode('category')->defaultValue('Application\\Sonata\\ClassificationBundle\\Entity\\Category')->end()
->end()
->end()
->end()
;
}
/**
* @param ArrayNodeDefinition $node
*/
private function addBuzzSection(ArrayNodeDefinition $node)
{
$node
->children()
->arrayNode('buzz')
->addDefaultsIfNotSet()
->children()
->scalarNode('connector')->defaultValue('sonata.media.buzz.connector.curl')->end()
->arrayNode('client')
->addDefaultsIfNotSet()
->children()
->booleanNode('ignore_errors')->defaultValue(true)->end()
->scalarNode('max_redirects')->defaultValue(5)->end()
->scalarNode('timeout')->defaultValue(5)->end()
->booleanNode('verify_peer')->defaultValue(true)->end()
->scalarNode('proxy')->defaultNull()->end()
->end()
->end()
->end()
->end()
;
}
/**
* @param ArrayNodeDefinition $node
*/
private function addResizerSection(ArrayNodeDefinition $node)
{
$node
->children()
->arrayNode('resizer')
->addDefaultsIfNotSet()
->children()
->arrayNode('simple')
->addDefaultsIfNotSet()
->children()
->scalarNode('mode')->defaultValue('inset')->end()
->end()
->end()
->arrayNode('square')
->addDefaultsIfNotSet()
->children()
->scalarNode('mode')->defaultValue('inset')->end()
->end()
->end()
->end()
->end()
->end()
;
}
}
| 1 | 6,915 | Please add line breaks, so the line doesn't exceed 80 chars. | sonata-project-SonataMediaBundle | php |
@@ -29,11 +29,10 @@ namespace OpenTelemetry.Instrumentation.AspNetCore
/// <summary>
/// Initializes a new instance of the <see cref="AspNetCoreInstrumentation"/> class.
/// </summary>
- /// <param name="activitySource">ActivitySource adapter instance.</param>
/// <param name="options">Configuration options for ASP.NET Core instrumentation.</param>
- public AspNetCoreInstrumentation(ActivitySourceAdapter activitySource, AspNetCoreInstrumentationOptions options)
+ public AspNetCoreInstrumentation(AspNetCoreInstrumentationOptions options)
{
- this.diagnosticSourceSubscriber = new DiagnosticSourceSubscriber(new HttpInListener("Microsoft.AspNetCore", options, activitySource), null);
+ this.diagnosticSourceSubscriber = new DiagnosticSourceSubscriber(new HttpInListener("Microsoft.AspNetCore", options), null);
this.diagnosticSourceSubscriber.Subscribe();
}
| 1 | // <copyright file="AspNetCoreInstrumentation.cs" company="OpenTelemetry Authors">
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// </copyright>
using System;
using OpenTelemetry.Instrumentation.AspNetCore.Implementation;
using OpenTelemetry.Trace;
namespace OpenTelemetry.Instrumentation.AspNetCore
{
/// <summary>
/// Asp.Net Core Requests instrumentation.
/// </summary>
internal class AspNetCoreInstrumentation : IDisposable
{
private readonly DiagnosticSourceSubscriber diagnosticSourceSubscriber;
/// <summary>
/// Initializes a new instance of the <see cref="AspNetCoreInstrumentation"/> class.
/// </summary>
/// <param name="activitySource">ActivitySource adapter instance.</param>
/// <param name="options">Configuration options for ASP.NET Core instrumentation.</param>
public AspNetCoreInstrumentation(ActivitySourceAdapter activitySource, AspNetCoreInstrumentationOptions options)
{
this.diagnosticSourceSubscriber = new DiagnosticSourceSubscriber(new HttpInListener("Microsoft.AspNetCore", options, activitySource), null);
this.diagnosticSourceSubscriber.Subscribe();
}
/// <inheritdoc/>
public void Dispose()
{
this.diagnosticSourceSubscriber?.Dispose();
}
}
}
| 1 | 19,274 | I initially thought (inccoreclty) this is a breaking change! The public api analyzer is a gift! | open-telemetry-opentelemetry-dotnet | .cs |
@@ -243,7 +243,7 @@ _ostree_delta_compute_similar_objects (OstreeRepo *repo,
{
gboolean ret = FALSE;
g_autoptr(GHashTable) ret_modified_regfile_content =
- g_hash_table_new_full (g_str_hash, g_str_equal, g_free, (GDestroyNotify)g_ptr_array_unref);
+ g_hash_table_new_full (g_str_hash, g_str_equal, g_free, g_free);
g_autoptr(GPtrArray) from_sizes = NULL;
g_autoptr(GPtrArray) to_sizes = NULL;
guint i, j; | 1 | /* -*- mode: C; c-file-style: "gnu"; indent-tabs-mode: nil; -*-
*
* Copyright (C) 2015 Colin Walters <[email protected]>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
* Boston, MA 02111-1307, USA.
*/
#include "config.h"
#include <string.h>
#include <gio/gunixoutputstream.h>
#include "ostree-core-private.h"
#include "ostree-repo-private.h"
#include "ostree-lzma-compressor.h"
#include "ostree-repo-static-delta-private.h"
#include "ostree-diff.h"
#include "ostree-rollsum.h"
#include "otutil.h"
#include "ostree-varint.h"
void
_ostree_delta_content_sizenames_free (gpointer v)
{
OstreeDeltaContentSizeNames *ce = v;
g_free (ce->checksum);
g_ptr_array_unref (ce->basenames);
g_free (ce);
}
static gboolean
build_content_sizenames_recurse (OstreeRepo *repo,
OstreeRepoCommitTraverseIter *iter,
GHashTable *sizenames_map,
GHashTable *include_only_objects,
GCancellable *cancellable,
GError **error)
{
gboolean ret = FALSE;
while (TRUE)
{
OstreeRepoCommitIterResult iterres =
ostree_repo_commit_traverse_iter_next (iter, cancellable, error);
if (iterres == OSTREE_REPO_COMMIT_ITER_RESULT_ERROR)
goto out;
else if (iterres == OSTREE_REPO_COMMIT_ITER_RESULT_END)
break;
else if (iterres == OSTREE_REPO_COMMIT_ITER_RESULT_FILE)
{
char *name;
char *checksum;
OstreeDeltaContentSizeNames *csizenames;
ostree_repo_commit_traverse_iter_get_file (iter, &name, &checksum);
if (include_only_objects && !g_hash_table_contains (include_only_objects, checksum))
continue;
csizenames = g_hash_table_lookup (sizenames_map, checksum);
if (!csizenames)
{
g_autoptr(GFileInfo) finfo = NULL;
if (!ostree_repo_load_file (repo, checksum,
NULL, &finfo, NULL,
cancellable, error))
goto out;
if (g_file_info_get_file_type (finfo) != G_FILE_TYPE_REGULAR)
continue;
csizenames = g_new0 (OstreeDeltaContentSizeNames, 1);
csizenames->checksum = g_strdup (checksum);
csizenames->size = g_file_info_get_size (finfo);
g_hash_table_replace (sizenames_map, csizenames->checksum, csizenames);
}
if (!csizenames->basenames)
csizenames->basenames = g_ptr_array_new_with_free_func (g_free);
g_ptr_array_add (csizenames->basenames, g_strdup (name));
}
else if (iterres == OSTREE_REPO_COMMIT_ITER_RESULT_DIR)
{
char *name;
char *content_checksum;
char *meta_checksum;
g_autoptr(GVariant) dirtree = NULL;
ostree_cleanup_repo_commit_traverse_iter
OstreeRepoCommitTraverseIter subiter = { 0, };
ostree_repo_commit_traverse_iter_get_dir (iter, &name, &content_checksum, &meta_checksum);
if (!ostree_repo_load_variant (repo, OSTREE_OBJECT_TYPE_DIR_TREE,
content_checksum, &dirtree,
error))
goto out;
if (!ostree_repo_commit_traverse_iter_init_dirtree (&subiter, repo, dirtree,
OSTREE_REPO_COMMIT_TRAVERSE_FLAG_NONE,
error))
goto out;
if (!build_content_sizenames_recurse (repo, &subiter,
sizenames_map, include_only_objects,
cancellable, error))
goto out;
}
else
g_assert_not_reached ();
}
ret = TRUE;
out:
return ret;
}
static int
compare_sizenames (const void *a,
const void *b)
{
OstreeDeltaContentSizeNames *sn_a = *(OstreeDeltaContentSizeNames**)(void*)a;
OstreeDeltaContentSizeNames *sn_b = *(OstreeDeltaContentSizeNames**)(void*)b;
return sn_a->size - sn_b->size;
}
/*
* Generate a sorted array of [(checksum: str, size: uint64, names: array[string]), ...]
* for regular file content.
*/
static gboolean
build_content_sizenames_filtered (OstreeRepo *repo,
GVariant *commit,
GHashTable *include_only_objects,
GPtrArray **out_sizenames,
GCancellable *cancellable,
GError **error)
{
gboolean ret = FALSE;
g_autoptr(GPtrArray) ret_sizenames =
g_ptr_array_new_with_free_func (_ostree_delta_content_sizenames_free);
g_autoptr(GHashTable) sizenames_map =
g_hash_table_new_full (g_str_hash, g_str_equal, NULL, _ostree_delta_content_sizenames_free);
ostree_cleanup_repo_commit_traverse_iter
OstreeRepoCommitTraverseIter iter = { 0, };
if (!ostree_repo_commit_traverse_iter_init_commit (&iter, repo, commit,
OSTREE_REPO_COMMIT_TRAVERSE_FLAG_NONE,
error))
goto out;
if (!build_content_sizenames_recurse (repo, &iter, sizenames_map, include_only_objects,
cancellable, error))
goto out;
{ GHashTableIter hashiter;
gpointer hkey, hvalue;
g_hash_table_iter_init (&hashiter, sizenames_map);
while (g_hash_table_iter_next (&hashiter, &hkey, &hvalue))
{
g_hash_table_iter_steal (&hashiter);
g_ptr_array_add (ret_sizenames, hvalue);
}
}
g_ptr_array_sort (ret_sizenames, compare_sizenames);
ret = TRUE;
if (out_sizenames)
*out_sizenames = g_steal_pointer (&ret_sizenames);
out:
return ret;
}
static gboolean
string_array_nonempty_intersection (GPtrArray *a,
GPtrArray *b,
gboolean fuzzy)
{
guint i;
for (i = 0; i < a->len; i++)
{
guint j;
const char *a_str = a->pdata[i];
const char *a_dot = strchr (a_str, '.');
for (j = 0; j < b->len; j++)
{
const char *b_str = b->pdata[j];
const char *b_dot = strchr (b_str, '.');
/* When doing fuzzy comparison, just compare the part before the '.' if it exists. */
if (fuzzy && a_dot && b_dot && b_dot - b_str && b_dot - b_str == a_dot - a_str)
{
if (strncmp (a_str, b_str, a_dot - a_str) == 0)
return TRUE;
}
else
{
if (strcmp (a_str, b_str) == 0)
return TRUE;
}
}
}
return FALSE;
}
/*
* Build up a map of files with matching basenames and similar size,
* and use it to find apparently similar objects.
*
* @new_reachable_regfile_content is a Set<checksum> of new regular
* file objects.
*
* Currently, @out_modified_regfile_content will be a Map<to checksum,from checksum>;
* however in the future it would be easy to have this function return
* multiple candidate matches. The hard part would be changing
* the delta compiler to iterate over all matches, determine
* a cost for each one, then pick the best.
*/
gboolean
_ostree_delta_compute_similar_objects (OstreeRepo *repo,
GVariant *from_commit,
GVariant *to_commit,
GHashTable *new_reachable_regfile_content,
guint similarity_percent_threshold,
GHashTable **out_modified_regfile_content,
GCancellable *cancellable,
GError **error)
{
gboolean ret = FALSE;
g_autoptr(GHashTable) ret_modified_regfile_content =
g_hash_table_new_full (g_str_hash, g_str_equal, g_free, (GDestroyNotify)g_ptr_array_unref);
g_autoptr(GPtrArray) from_sizes = NULL;
g_autoptr(GPtrArray) to_sizes = NULL;
guint i, j;
guint lower;
guint upper;
if (!build_content_sizenames_filtered (repo, from_commit, NULL,
&from_sizes,
cancellable, error))
goto out;
if (!build_content_sizenames_filtered (repo, to_commit, new_reachable_regfile_content,
&to_sizes,
cancellable, error))
goto out;
/* Iterate over all newly added objects, find objects which have
* similar basename and sizes.
*
* Because the arrays are sorted by size, we can maintain a `lower`
* bound on the original (from) objects to start searching.
*/
lower = 0;
upper = from_sizes->len;
for (i = 0; i < to_sizes->len; i++)
{
int fuzzy;
gboolean found = FALSE;
OstreeDeltaContentSizeNames *to_sizenames = to_sizes->pdata[i];
const guint64 min_threshold = to_sizenames->size *
(1.0-similarity_percent_threshold/100.0);
const guint64 max_threshold = to_sizenames->size *
(1.0+similarity_percent_threshold/100.0);
/* Don't build candidates for the empty object */
if (to_sizenames->size == 0)
continue;
for (fuzzy = 0; fuzzy < 2 && !found; fuzzy++)
{
for (j = lower; j < upper; j++)
{
OstreeDeltaContentSizeNames *from_sizenames = from_sizes->pdata[j];
/* Don't build candidates for the empty object */
if (from_sizenames->size == 0)
{
continue;
}
if (from_sizenames->size < min_threshold)
{
lower++;
continue;
}
if (from_sizenames->size > max_threshold)
break;
if (!string_array_nonempty_intersection (from_sizenames->basenames,
to_sizenames->basenames,
fuzzy == 1))
{
continue;
}
/* Only one candidate right now */
g_hash_table_insert (ret_modified_regfile_content,
g_strdup (to_sizenames->checksum),
g_strdup (from_sizenames->checksum));
found = TRUE;
break;
}
}
}
ret = TRUE;
if (out_modified_regfile_content)
*out_modified_regfile_content = g_steal_pointer (&ret_modified_regfile_content);
out:
return ret;
}
| 1 | 9,056 | I'm not sure how this one didn't segfault before. | ostreedev-ostree | c |
@@ -27,6 +27,13 @@ import (
)
var _ = Describe("Endpoints", func() {
+ const (
+ ProtoUDP = 17
+ ProtoIPIP = 4
+ VXLANPort = 0
+ VXLANVNI = 0
+ )
+
for _, trueOrFalse := range []bool{true, false} {
kubeIPVSEnabled := trueOrFalse
var rrConfigNormalMangleReturn = Config{ | 1 | // Copyright (c) 2017-2018 Tigera, Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package rules_test
import (
"strings"
. "github.com/projectcalico/felix/rules"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/projectcalico/felix/ipsets"
. "github.com/projectcalico/felix/iptables"
)
var _ = Describe("Endpoints", func() {
for _, trueOrFalse := range []bool{true, false} {
kubeIPVSEnabled := trueOrFalse
var rrConfigNormalMangleReturn = Config{
IPIPEnabled: true,
IPIPTunnelAddress: nil,
IPSetConfigV4: ipsets.NewIPVersionConfig(ipsets.IPFamilyV4, "cali", nil, nil),
IPSetConfigV6: ipsets.NewIPVersionConfig(ipsets.IPFamilyV6, "cali", nil, nil),
IptablesMarkAccept: 0x8,
IptablesMarkPass: 0x10,
IptablesMarkScratch0: 0x20,
IptablesMarkScratch1: 0x40,
IptablesMarkEndpoint: 0xff00,
IptablesMarkNonCaliEndpoint: 0x0100,
KubeIPVSSupportEnabled: kubeIPVSEnabled,
IptablesMangleAllowAction: "RETURN",
}
var rrConfigConntrackDisabledReturnAction = Config{
IPIPEnabled: true,
IPIPTunnelAddress: nil,
IPSetConfigV4: ipsets.NewIPVersionConfig(ipsets.IPFamilyV4, "cali", nil, nil),
IPSetConfigV6: ipsets.NewIPVersionConfig(ipsets.IPFamilyV6, "cali", nil, nil),
IptablesMarkAccept: 0x8,
IptablesMarkPass: 0x10,
IptablesMarkScratch0: 0x20,
IptablesMarkScratch1: 0x40,
IptablesMarkEndpoint: 0xff00,
IptablesMarkNonCaliEndpoint: 0x0100,
KubeIPVSSupportEnabled: kubeIPVSEnabled,
DisableConntrackInvalid: true,
IptablesFilterAllowAction: "RETURN",
}
var renderer RuleRenderer
var epMarkMapper EndpointMarkMapper
Context("with normal config", func() {
BeforeEach(func() {
renderer = NewRenderer(rrConfigNormalMangleReturn)
epMarkMapper = NewEndpointMarkMapper(rrConfigNormalMangleReturn.IptablesMarkEndpoint,
rrConfigNormalMangleReturn.IptablesMarkNonCaliEndpoint)
})
It("Song should render a minimal workload endpoint", func() {
Expect(renderer.WorkloadEndpointToIptablesChains(
"cali1234", epMarkMapper,
true,
nil,
nil,
nil)).To(Equal(trimSMChain(kubeIPVSEnabled, []*Chain{
{
Name: "cali-tw-cali1234",
Rules: []Rule{
// conntrack rules.
{Match: Match().ConntrackState("RELATED,ESTABLISHED"),
Action: AcceptAction{}},
{Match: Match().ConntrackState("INVALID"),
Action: DropAction{}},
{Action: ClearMarkAction{Mark: 0x8}},
{Action: DropAction{},
Comment: "Drop if no profiles matched"},
},
},
{
Name: "cali-fw-cali1234",
Rules: []Rule{
// conntrack rules.
{Match: Match().ConntrackState("RELATED,ESTABLISHED"),
Action: AcceptAction{}},
{Match: Match().ConntrackState("INVALID"),
Action: DropAction{}},
{Action: ClearMarkAction{Mark: 0x8}},
{Action: DropAction{},
Comment: "Drop if no profiles matched"},
},
},
{
Name: "cali-sm-cali1234",
Rules: []Rule{
{Action: SetMaskedMarkAction{Mark: 0xd400, Mask: 0xff00}},
},
},
})))
})
It("should render a disabled workload endpoint", func() {
Expect(renderer.WorkloadEndpointToIptablesChains(
"cali1234", epMarkMapper,
false,
nil,
nil,
nil,
)).To(Equal(trimSMChain(kubeIPVSEnabled, []*Chain{
{
Name: "cali-tw-cali1234",
Rules: []Rule{
{Action: DropAction{},
Comment: "Endpoint admin disabled"},
},
},
{
Name: "cali-fw-cali1234",
Rules: []Rule{
{Action: DropAction{},
Comment: "Endpoint admin disabled"},
},
},
{
Name: "cali-sm-cali1234",
Rules: []Rule{
{Action: SetMaskedMarkAction{Mark: 0xd400, Mask: 0xff00}},
},
},
})))
})
It("should render a fully-loaded workload endpoint", func() {
Expect(renderer.WorkloadEndpointToIptablesChains(
"cali1234",
epMarkMapper,
true,
[]string{"ai", "bi"},
[]string{"ae", "be"},
[]string{"prof1", "prof2"},
)).To(Equal(trimSMChain(kubeIPVSEnabled, []*Chain{
{
Name: "cali-tw-cali1234",
Rules: []Rule{
// conntrack rules.
{Match: Match().ConntrackState("RELATED,ESTABLISHED"),
Action: AcceptAction{}},
{Match: Match().ConntrackState("INVALID"),
Action: DropAction{}},
{Action: ClearMarkAction{Mark: 0x8}},
{Comment: "Start of policies",
Action: ClearMarkAction{Mark: 0x10}},
{Match: Match().MarkClear(0x10),
Action: JumpAction{Target: "cali-pi-ai"}},
{Match: Match().MarkSingleBitSet(0x8),
Action: ReturnAction{},
Comment: "Return if policy accepted"},
{Match: Match().MarkClear(0x10),
Action: JumpAction{Target: "cali-pi-bi"}},
{Match: Match().MarkSingleBitSet(0x8),
Action: ReturnAction{},
Comment: "Return if policy accepted"},
{Match: Match().MarkClear(0x10),
Action: DropAction{},
Comment: "Drop if no policies passed packet"},
{Action: JumpAction{Target: "cali-pri-prof1"}},
{Match: Match().MarkSingleBitSet(0x8),
Action: ReturnAction{},
Comment: "Return if profile accepted"},
{Action: JumpAction{Target: "cali-pri-prof2"}},
{Match: Match().MarkSingleBitSet(0x8),
Action: ReturnAction{},
Comment: "Return if profile accepted"},
{Action: DropAction{},
Comment: "Drop if no profiles matched"},
},
},
{
Name: "cali-fw-cali1234",
Rules: []Rule{
// conntrack rules.
{Match: Match().ConntrackState("RELATED,ESTABLISHED"),
Action: AcceptAction{}},
{Match: Match().ConntrackState("INVALID"),
Action: DropAction{}},
{Action: ClearMarkAction{Mark: 0x8}},
{Comment: "Start of policies",
Action: ClearMarkAction{Mark: 0x10}},
{Match: Match().MarkClear(0x10),
Action: JumpAction{Target: "cali-po-ae"}},
{Match: Match().MarkSingleBitSet(0x8),
Action: ReturnAction{},
Comment: "Return if policy accepted"},
{Match: Match().MarkClear(0x10),
Action: JumpAction{Target: "cali-po-be"}},
{Match: Match().MarkSingleBitSet(0x8),
Action: ReturnAction{},
Comment: "Return if policy accepted"},
{Match: Match().MarkClear(0x10),
Action: DropAction{},
Comment: "Drop if no policies passed packet"},
{Action: JumpAction{Target: "cali-pro-prof1"}},
{Match: Match().MarkSingleBitSet(0x8),
Action: ReturnAction{},
Comment: "Return if profile accepted"},
{Action: JumpAction{Target: "cali-pro-prof2"}},
{Match: Match().MarkSingleBitSet(0x8),
Action: ReturnAction{},
Comment: "Return if profile accepted"},
{Action: DropAction{},
Comment: "Drop if no profiles matched"},
},
},
{
Name: "cali-sm-cali1234",
Rules: []Rule{
{Action: SetMaskedMarkAction{Mark: 0xd400, Mask: 0xff00}},
},
},
})))
})
It("should render a host endpoint", func() {
Expect(renderer.HostEndpointToFilterChains("eth0",
epMarkMapper,
[]string{"ai", "bi"}, []string{"ae", "be"},
[]string{"afi", "bfi"}, []string{"afe", "bfe"},
[]string{"prof1", "prof2"})).To(Equal(trimSMChain(kubeIPVSEnabled, []*Chain{
{
Name: "cali-th-eth0",
Rules: []Rule{
// conntrack rules.
{Match: Match().ConntrackState("RELATED,ESTABLISHED"),
Action: AcceptAction{}},
{Match: Match().ConntrackState("INVALID"),
Action: DropAction{}},
// Host endpoints get extra failsafe rules.
{Action: JumpAction{Target: "cali-failsafe-out"}},
{Action: ClearMarkAction{Mark: 0x8}},
{Comment: "Start of policies",
Action: ClearMarkAction{Mark: 0x10}},
{Match: Match().MarkClear(0x10),
Action: JumpAction{Target: "cali-po-ae"}},
{Match: Match().MarkSingleBitSet(0x8),
Action: ReturnAction{},
Comment: "Return if policy accepted"},
{Match: Match().MarkClear(0x10),
Action: JumpAction{Target: "cali-po-be"}},
{Match: Match().MarkSingleBitSet(0x8),
Action: ReturnAction{},
Comment: "Return if policy accepted"},
{Match: Match().MarkClear(0x10),
Action: DropAction{},
Comment: "Drop if no policies passed packet"},
{Action: JumpAction{Target: "cali-pro-prof1"}},
{Match: Match().MarkSingleBitSet(0x8),
Action: ReturnAction{},
Comment: "Return if profile accepted"},
{Action: JumpAction{Target: "cali-pro-prof2"}},
{Match: Match().MarkSingleBitSet(0x8),
Action: ReturnAction{},
Comment: "Return if profile accepted"},
{Action: DropAction{},
Comment: "Drop if no profiles matched"},
},
},
{
Name: "cali-fh-eth0",
Rules: []Rule{
// conntrack rules.
{Match: Match().ConntrackState("RELATED,ESTABLISHED"),
Action: AcceptAction{}},
{Match: Match().ConntrackState("INVALID"),
Action: DropAction{}},
// Host endpoints get extra failsafe rules.
{Action: JumpAction{Target: "cali-failsafe-in"}},
{Action: ClearMarkAction{Mark: 0x8}},
{Comment: "Start of policies",
Action: ClearMarkAction{Mark: 0x10}},
{Match: Match().MarkClear(0x10),
Action: JumpAction{Target: "cali-pi-ai"}},
{Match: Match().MarkSingleBitSet(0x8),
Action: ReturnAction{},
Comment: "Return if policy accepted"},
{Match: Match().MarkClear(0x10),
Action: JumpAction{Target: "cali-pi-bi"}},
{Match: Match().MarkSingleBitSet(0x8),
Action: ReturnAction{},
Comment: "Return if policy accepted"},
{Match: Match().MarkClear(0x10),
Action: DropAction{},
Comment: "Drop if no policies passed packet"},
{Action: JumpAction{Target: "cali-pri-prof1"}},
{Match: Match().MarkSingleBitSet(0x8),
Action: ReturnAction{},
Comment: "Return if profile accepted"},
{Action: JumpAction{Target: "cali-pri-prof2"}},
{Match: Match().MarkSingleBitSet(0x8),
Action: ReturnAction{},
Comment: "Return if profile accepted"},
{Action: DropAction{},
Comment: "Drop if no profiles matched"},
},
},
{
Name: "cali-thfw-eth0",
Rules: []Rule{
// conntrack rules.
{Match: Match().ConntrackState("RELATED,ESTABLISHED"),
Action: AcceptAction{}},
{Match: Match().ConntrackState("INVALID"),
Action: DropAction{}},
{Action: ClearMarkAction{Mark: 0x8}},
{Comment: "Start of policies",
Action: ClearMarkAction{Mark: 0x10}},
{Match: Match().MarkClear(0x10),
Action: JumpAction{Target: "cali-po-afe"}},
{Match: Match().MarkSingleBitSet(0x8),
Action: ReturnAction{},
Comment: "Return if policy accepted"},
{Match: Match().MarkClear(0x10),
Action: JumpAction{Target: "cali-po-bfe"}},
{Match: Match().MarkSingleBitSet(0x8),
Action: ReturnAction{},
Comment: "Return if policy accepted"},
{Match: Match().MarkClear(0x10),
Action: DropAction{},
Comment: "Drop if no policies passed packet"},
},
},
{
Name: "cali-fhfw-eth0",
Rules: []Rule{
// conntrack rules.
{Match: Match().ConntrackState("RELATED,ESTABLISHED"),
Action: AcceptAction{}},
{Match: Match().ConntrackState("INVALID"),
Action: DropAction{}},
{Action: ClearMarkAction{Mark: 0x8}},
{Comment: "Start of policies",
Action: ClearMarkAction{Mark: 0x10}},
{Match: Match().MarkClear(0x10),
Action: JumpAction{Target: "cali-pi-afi"}},
{Match: Match().MarkSingleBitSet(0x8),
Action: ReturnAction{},
Comment: "Return if policy accepted"},
{Match: Match().MarkClear(0x10),
Action: JumpAction{Target: "cali-pi-bfi"}},
{Match: Match().MarkSingleBitSet(0x8),
Action: ReturnAction{},
Comment: "Return if policy accepted"},
{Match: Match().MarkClear(0x10),
Action: DropAction{},
Comment: "Drop if no policies passed packet"},
},
},
{
Name: "cali-sm-eth0",
Rules: []Rule{
{Action: SetMaskedMarkAction{Mark: 0xa200, Mask: 0xff00}},
},
},
})))
})
It("should render host endpoint raw chains with untracked policies", func() {
Expect(renderer.HostEndpointToRawChains("eth0", []string{"c"}, []string{"c"})).To(Equal([]*Chain{
{
Name: "cali-th-eth0",
Rules: []Rule{
// Host endpoints get extra failsafe rules.
{Action: JumpAction{Target: "cali-failsafe-out"}},
{Action: ClearMarkAction{Mark: 0x8}},
{Comment: "Start of policies",
Action: ClearMarkAction{Mark: 0x10}},
{Match: Match().MarkClear(0x10),
Action: JumpAction{Target: "cali-po-c"}},
// Extra NOTRACK action before returning in raw table.
{Match: Match().MarkSingleBitSet(0x8),
Action: NoTrackAction{}},
{Match: Match().MarkSingleBitSet(0x8),
Action: ReturnAction{},
Comment: "Return if policy accepted"},
// No drop actions or profiles in raw table.
},
},
{
Name: "cali-fh-eth0",
Rules: []Rule{
// Host endpoints get extra failsafe rules.
{Action: JumpAction{Target: "cali-failsafe-in"}},
{Action: ClearMarkAction{Mark: 0x8}},
{Comment: "Start of policies",
Action: ClearMarkAction{Mark: 0x10}},
{Match: Match().MarkClear(0x10),
Action: JumpAction{Target: "cali-pi-c"}},
// Extra NOTRACK action before returning in raw table.
{Match: Match().MarkSingleBitSet(0x8),
Action: NoTrackAction{}},
{Match: Match().MarkSingleBitSet(0x8),
Action: ReturnAction{},
Comment: "Return if policy accepted"},
// No drop actions or profiles in raw table.
},
},
}))
})
It("should render host endpoint mangle chains with pre-DNAT policies", func() {
Expect(renderer.HostEndpointToMangleChains(
"eth0",
[]string{"c"},
)).To(Equal([]*Chain{
{
Name: "cali-fh-eth0",
Rules: []Rule{
// conntrack rules.
{Match: Match().ConntrackState("RELATED,ESTABLISHED"),
Action: SetMarkAction{Mark: 0x8}},
{Match: Match().ConntrackState("RELATED,ESTABLISHED"),
Action: ReturnAction{}},
{Match: Match().ConntrackState("INVALID"),
Action: DropAction{}},
// Host endpoints get extra failsafe rules.
{Action: JumpAction{Target: "cali-failsafe-in"}},
{Action: ClearMarkAction{Mark: 0x8}},
{Comment: "Start of policies",
Action: ClearMarkAction{Mark: 0x10}},
{Match: Match().MarkClear(0x10),
Action: JumpAction{Target: "cali-pi-c"}},
{Match: Match().MarkSingleBitSet(0x8),
Action: ReturnAction{},
Comment: "Return if policy accepted"},
// No drop actions or profiles in raw table.
},
},
}))
})
})
Describe("with ctstate=INVALID disabled", func() {
BeforeEach(func() {
renderer = NewRenderer(rrConfigConntrackDisabledReturnAction)
epMarkMapper = NewEndpointMarkMapper(rrConfigConntrackDisabledReturnAction.IptablesMarkEndpoint,
rrConfigConntrackDisabledReturnAction.IptablesMarkNonCaliEndpoint)
})
It("should render a minimal workload endpoint", func() {
Expect(renderer.WorkloadEndpointToIptablesChains(
"cali1234",
epMarkMapper,
true,
nil,
nil,
nil,
)).To(Equal(trimSMChain(kubeIPVSEnabled, []*Chain{
{
Name: "cali-tw-cali1234",
Rules: []Rule{
// conntrack rules.
{Match: Match().ConntrackState("RELATED,ESTABLISHED"),
Action: SetMarkAction{Mark: 0x8}},
{Match: Match().ConntrackState("RELATED,ESTABLISHED"),
Action: ReturnAction{}},
{Action: ClearMarkAction{Mark: 0x8}},
{Action: DropAction{},
Comment: "Drop if no profiles matched"},
},
},
{
Name: "cali-fw-cali1234",
Rules: []Rule{
// conntrack rules.
{Match: Match().ConntrackState("RELATED,ESTABLISHED"),
Action: SetMarkAction{Mark: 0x8}},
{Match: Match().ConntrackState("RELATED,ESTABLISHED"),
Action: ReturnAction{}},
{Action: ClearMarkAction{Mark: 0x8}},
{Action: DropAction{},
Comment: "Drop if no profiles matched"},
},
},
{
Name: "cali-sm-cali1234",
Rules: []Rule{
{Action: SetMaskedMarkAction{Mark: 0xd400, Mask: 0xff00}},
},
},
})))
})
It("should render host endpoint mangle chains with pre-DNAT policies", func() {
Expect(renderer.HostEndpointToMangleChains(
"eth0",
[]string{"c"},
)).To(Equal([]*Chain{
{
Name: "cali-fh-eth0",
Rules: []Rule{
// conntrack rules.
{Match: Match().ConntrackState("RELATED,ESTABLISHED"),
Action: AcceptAction{}},
// Host endpoints get extra failsafe rules.
{Action: JumpAction{Target: "cali-failsafe-in"}},
{Action: ClearMarkAction{Mark: 0x8}},
{Comment: "Start of policies",
Action: ClearMarkAction{Mark: 0x10}},
{Match: Match().MarkClear(0x10),
Action: JumpAction{Target: "cali-pi-c"}},
{Match: Match().MarkSingleBitSet(0x8),
Action: ReturnAction{},
Comment: "Return if policy accepted"},
// No drop actions or profiles in raw table.
},
},
}))
})
})
}
})
func trimSMChain(ipvsEnable bool, chains []*Chain) []*Chain {
result := []*Chain{}
for _, chain := range chains {
if !ipvsEnable && strings.HasPrefix(chain.Name, "cali-sm") {
continue
}
result = append(result, chain)
}
return result
}
| 1 | 16,990 | Same points as in other test file. | projectcalico-felix | c |
@@ -16,14 +16,7 @@
*/
package org.apache.lucene.analysis.hunspell;
-import java.io.BufferedInputStream;
-import java.io.BufferedOutputStream;
-import java.io.BufferedReader;
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.InputStreamReader;
-import java.io.LineNumberReader;
-import java.io.OutputStream;
+import java.io.*;
import java.nio.charset.Charset;
import java.nio.charset.CharsetDecoder;
import java.nio.charset.CodingErrorAction; | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.lucene.analysis.hunspell;
import java.io.BufferedInputStream;
import java.io.BufferedOutputStream;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.LineNumberReader;
import java.io.OutputStream;
import java.nio.charset.Charset;
import java.nio.charset.CharsetDecoder;
import java.nio.charset.CodingErrorAction;
import java.nio.charset.StandardCharsets;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.text.ParseException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashMap;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.TreeMap;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.apache.lucene.codecs.CodecUtil;
import org.apache.lucene.store.ByteArrayDataOutput;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.IOContext;
import org.apache.lucene.store.IndexOutput;
import org.apache.lucene.util.ArrayUtil;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.BytesRefBuilder;
import org.apache.lucene.util.BytesRefHash;
import org.apache.lucene.util.CharsRef;
import org.apache.lucene.util.IOUtils;
import org.apache.lucene.util.IntsRef;
import org.apache.lucene.util.IntsRefBuilder;
import org.apache.lucene.util.OfflineSorter;
import org.apache.lucene.util.OfflineSorter.ByteSequencesReader;
import org.apache.lucene.util.OfflineSorter.ByteSequencesWriter;
import org.apache.lucene.util.automaton.CharacterRunAutomaton;
import org.apache.lucene.util.automaton.RegExp;
import org.apache.lucene.util.fst.CharSequenceOutputs;
import org.apache.lucene.util.fst.FST;
import org.apache.lucene.util.fst.FSTCompiler;
import org.apache.lucene.util.fst.IntSequenceOutputs;
import org.apache.lucene.util.fst.Outputs;
import org.apache.lucene.util.fst.Util;
/** In-memory structure for the dictionary (.dic) and affix (.aff) data of a hunspell dictionary. */
public class Dictionary {
static final char[] NOFLAGS = new char[0];
private static final String ALIAS_KEY = "AF";
private static final String MORPH_ALIAS_KEY = "AM";
private static final String PREFIX_KEY = "PFX";
private static final String SUFFIX_KEY = "SFX";
private static final String FLAG_KEY = "FLAG";
private static final String COMPLEXPREFIXES_KEY = "COMPLEXPREFIXES";
private static final String CIRCUMFIX_KEY = "CIRCUMFIX";
private static final String IGNORE_KEY = "IGNORE";
private static final String ICONV_KEY = "ICONV";
private static final String OCONV_KEY = "OCONV";
private static final String FULLSTRIP_KEY = "FULLSTRIP";
private static final String LANG_KEY = "LANG";
private static final String KEEPCASE_KEY = "KEEPCASE";
private static final String NEEDAFFIX_KEY = "NEEDAFFIX";
private static final String PSEUDOROOT_KEY = "PSEUDOROOT";
private static final String ONLYINCOMPOUND_KEY = "ONLYINCOMPOUND";
private static final String NUM_FLAG_TYPE = "num";
private static final String UTF8_FLAG_TYPE = "UTF-8";
private static final String LONG_FLAG_TYPE = "long";
// TODO: really for suffixes we should reverse the automaton and run them backwards
private static final String PREFIX_CONDITION_REGEX_PATTERN = "%s.*";
private static final String SUFFIX_CONDITION_REGEX_PATTERN = ".*%s";
FST<IntsRef> prefixes;
FST<IntsRef> suffixes;
// all condition checks used by prefixes and suffixes. these are typically re-used across
// many affix stripping rules. so these are deduplicated, to save RAM.
ArrayList<CharacterRunAutomaton> patterns = new ArrayList<>();
// the entries in the .dic file, mapping to their set of flags.
// the fst output is the ordinal list for flagLookup
FST<IntsRef> words;
// the list of unique flagsets (wordforms). theoretically huge, but practically
// small (e.g. for polish this is 756), otherwise humans wouldn't be able to deal with it either.
BytesRefHash flagLookup = new BytesRefHash();
// the list of unique strip affixes.
char[] stripData;
int[] stripOffsets;
// 8 bytes per affix
byte[] affixData = new byte[64];
private int currentAffix = 0;
// Default flag parsing strategy
private FlagParsingStrategy flagParsingStrategy = new SimpleFlagParsingStrategy();
// AF entries
private String[] aliases;
private int aliasCount = 0;
// AM entries
private String[] morphAliases;
private int morphAliasCount = 0;
// st: morphological entries (either directly, or aliased from AM)
private String[] stemExceptions = new String[8];
private int stemExceptionCount = 0;
// we set this during sorting, so we know to add an extra FST output.
// when set, some words have exceptional stems, and the last entry is a pointer to stemExceptions
boolean hasStemExceptions;
private final Path tempPath = getDefaultTempDir(); // TODO: make this configurable?
boolean ignoreCase;
boolean complexPrefixes;
// if no affixes have continuation classes, no need to do 2-level affix stripping
boolean twoStageAffix;
int circumfix = -1; // circumfix flag, or -1 if one is not defined
int keepcase = -1; // keepcase flag, or -1 if one is not defined
int needaffix = -1; // needaffix flag, or -1 if one is not defined
int onlyincompound = -1; // onlyincompound flag, or -1 if one is not defined
// ignored characters (dictionary, affix, inputs)
private char[] ignore;
// FSTs used for ICONV/OCONV, output ord pointing to replacement text
FST<CharsRef> iconv;
FST<CharsRef> oconv;
boolean needsInputCleaning;
boolean needsOutputCleaning;
// true if we can strip suffixes "down to nothing"
boolean fullStrip;
// language declaration of the dictionary
String language;
// true if case algorithms should use alternate (Turkish/Azeri) mapping
boolean alternateCasing;
/**
* Creates a new Dictionary containing the information read from the provided InputStreams to
* hunspell affix and dictionary files. You have to close the provided InputStreams yourself.
*
* @param tempDir Directory to use for offline sorting
* @param tempFileNamePrefix prefix to use to generate temp file names
* @param affix InputStream for reading the hunspell affix file (won't be closed).
* @param dictionary InputStream for reading the hunspell dictionary file (won't be closed).
* @throws IOException Can be thrown while reading from the InputStreams
* @throws ParseException Can be thrown if the content of the files does not meet expected formats
*/
public Dictionary(
Directory tempDir, String tempFileNamePrefix, InputStream affix, InputStream dictionary)
throws IOException, ParseException {
this(tempDir, tempFileNamePrefix, affix, Collections.singletonList(dictionary), false);
}
/**
* Creates a new Dictionary containing the information read from the provided InputStreams to
* hunspell affix and dictionary files. You have to close the provided InputStreams yourself.
*
* @param tempDir Directory to use for offline sorting
* @param tempFileNamePrefix prefix to use to generate temp file names
* @param affix InputStream for reading the hunspell affix file (won't be closed).
* @param dictionaries InputStream for reading the hunspell dictionary files (won't be closed).
* @throws IOException Can be thrown while reading from the InputStreams
* @throws ParseException Can be thrown if the content of the files does not meet expected formats
*/
public Dictionary(
Directory tempDir,
String tempFileNamePrefix,
InputStream affix,
List<InputStream> dictionaries,
boolean ignoreCase)
throws IOException, ParseException {
this.ignoreCase = ignoreCase;
this.needsInputCleaning = ignoreCase;
this.needsOutputCleaning = false; // set if we have an OCONV
flagLookup.add(new BytesRef()); // no flags -> ord 0
Path aff = Files.createTempFile(tempPath, "affix", "aff");
OutputStream out = new BufferedOutputStream(Files.newOutputStream(aff));
InputStream aff1 = null;
InputStream aff2 = null;
boolean success = false;
try {
// copy contents of affix stream to temp file
final byte[] buffer = new byte[1024 * 8];
int len;
while ((len = affix.read(buffer)) > 0) {
out.write(buffer, 0, len);
}
out.close();
// pass 1: get encoding
aff1 = new BufferedInputStream(Files.newInputStream(aff));
String encoding = getDictionaryEncoding(aff1);
// pass 2: parse affixes
CharsetDecoder decoder = getJavaEncoding(encoding);
aff2 = new BufferedInputStream(Files.newInputStream(aff));
readAffixFile(aff2, decoder);
// read dictionary entries
IntSequenceOutputs o = IntSequenceOutputs.getSingleton();
FSTCompiler<IntsRef> fstCompiler = new FSTCompiler<>(FST.INPUT_TYPE.BYTE4, o);
readDictionaryFiles(tempDir, tempFileNamePrefix, dictionaries, decoder, fstCompiler);
words = fstCompiler.compile();
aliases = null; // no longer needed
morphAliases = null; // no longer needed
success = true;
} finally {
IOUtils.closeWhileHandlingException(out, aff1, aff2);
if (success) {
Files.delete(aff);
} else {
IOUtils.deleteFilesIgnoringExceptions(aff);
}
}
}
/** Looks up Hunspell word forms from the dictionary */
IntsRef lookupWord(char word[], int offset, int length) {
return lookup(words, word, offset, length);
}
// only for testing
IntsRef lookupPrefix(char word[], int offset, int length) {
return lookup(prefixes, word, offset, length);
}
// only for testing
IntsRef lookupSuffix(char word[], int offset, int length) {
return lookup(suffixes, word, offset, length);
}
IntsRef lookup(FST<IntsRef> fst, char word[], int offset, int length) {
if (fst == null) {
return null;
}
final FST.BytesReader bytesReader = fst.getBytesReader();
final FST.Arc<IntsRef> arc = fst.getFirstArc(new FST.Arc<IntsRef>());
// Accumulate output as we go
final IntsRef NO_OUTPUT = fst.outputs.getNoOutput();
IntsRef output = NO_OUTPUT;
int l = offset + length;
try {
for (int i = offset, cp = 0; i < l; i += Character.charCount(cp)) {
cp = Character.codePointAt(word, i, l);
if (fst.findTargetArc(cp, arc, arc, bytesReader) == null) {
return null;
} else if (arc.output() != NO_OUTPUT) {
output = fst.outputs.add(output, arc.output());
}
}
if (fst.findTargetArc(FST.END_LABEL, arc, arc, bytesReader) == null) {
return null;
} else if (arc.output() != NO_OUTPUT) {
return fst.outputs.add(output, arc.output());
} else {
return output;
}
} catch (IOException bogus) {
throw new RuntimeException(bogus);
}
}
/**
* Reads the affix file through the provided InputStream, building up the prefix and suffix maps
*
* @param affixStream InputStream to read the content of the affix file from
* @param decoder CharsetDecoder to decode the content of the file
* @throws IOException Can be thrown while reading from the InputStream
*/
private void readAffixFile(InputStream affixStream, CharsetDecoder decoder)
throws IOException, ParseException {
TreeMap<String, List<Integer>> prefixes = new TreeMap<>();
TreeMap<String, List<Integer>> suffixes = new TreeMap<>();
Map<String, Integer> seenPatterns = new HashMap<>();
// zero condition -> 0 ord
seenPatterns.put(".*", 0);
patterns.add(null);
// zero strip -> 0 ord
Map<String, Integer> seenStrips = new LinkedHashMap<>();
seenStrips.put("", 0);
LineNumberReader reader = new LineNumberReader(new InputStreamReader(affixStream, decoder));
String line = null;
while ((line = reader.readLine()) != null) {
// ignore any BOM marker on first line
if (reader.getLineNumber() == 1 && line.startsWith("\uFEFF")) {
line = line.substring(1);
}
if (line.startsWith(ALIAS_KEY)) {
parseAlias(line);
} else if (line.startsWith(MORPH_ALIAS_KEY)) {
parseMorphAlias(line);
} else if (line.startsWith(PREFIX_KEY)) {
parseAffix(
prefixes, line, reader, PREFIX_CONDITION_REGEX_PATTERN, seenPatterns, seenStrips);
} else if (line.startsWith(SUFFIX_KEY)) {
parseAffix(
suffixes, line, reader, SUFFIX_CONDITION_REGEX_PATTERN, seenPatterns, seenStrips);
} else if (line.startsWith(FLAG_KEY)) {
// Assume that the FLAG line comes before any prefix or suffixes
// Store the strategy so it can be used when parsing the dic file
flagParsingStrategy = getFlagParsingStrategy(line);
} else if (line.equals(COMPLEXPREFIXES_KEY)) {
complexPrefixes =
true; // 2-stage prefix+1-stage suffix instead of 2-stage suffix+1-stage prefix
} else if (line.startsWith(CIRCUMFIX_KEY)) {
String parts[] = line.split("\\s+");
if (parts.length != 2) {
throw new ParseException("Illegal CIRCUMFIX declaration", reader.getLineNumber());
}
circumfix = flagParsingStrategy.parseFlag(parts[1]);
} else if (line.startsWith(KEEPCASE_KEY)) {
String parts[] = line.split("\\s+");
if (parts.length != 2) {
throw new ParseException("Illegal KEEPCASE declaration", reader.getLineNumber());
}
keepcase = flagParsingStrategy.parseFlag(parts[1]);
} else if (line.startsWith(NEEDAFFIX_KEY) || line.startsWith(PSEUDOROOT_KEY)) {
String parts[] = line.split("\\s+");
if (parts.length != 2) {
throw new ParseException("Illegal NEEDAFFIX declaration", reader.getLineNumber());
}
needaffix = flagParsingStrategy.parseFlag(parts[1]);
} else if (line.startsWith(ONLYINCOMPOUND_KEY)) {
String parts[] = line.split("\\s+");
if (parts.length != 2) {
throw new ParseException("Illegal ONLYINCOMPOUND declaration", reader.getLineNumber());
}
onlyincompound = flagParsingStrategy.parseFlag(parts[1]);
} else if (line.startsWith(IGNORE_KEY)) {
String parts[] = line.split("\\s+");
if (parts.length != 2) {
throw new ParseException("Illegal IGNORE declaration", reader.getLineNumber());
}
ignore = parts[1].toCharArray();
Arrays.sort(ignore);
needsInputCleaning = true;
} else if (line.startsWith(ICONV_KEY) || line.startsWith(OCONV_KEY)) {
String parts[] = line.split("\\s+");
String type = parts[0];
if (parts.length != 2) {
throw new ParseException("Illegal " + type + " declaration", reader.getLineNumber());
}
int num = Integer.parseInt(parts[1]);
FST<CharsRef> res = parseConversions(reader, num);
if (type.equals("ICONV")) {
iconv = res;
needsInputCleaning |= iconv != null;
} else {
oconv = res;
needsOutputCleaning |= oconv != null;
}
} else if (line.startsWith(FULLSTRIP_KEY)) {
fullStrip = true;
} else if (line.startsWith(LANG_KEY)) {
language = line.substring(LANG_KEY.length()).trim();
alternateCasing = "tr_TR".equals(language) || "az_AZ".equals(language);
}
}
this.prefixes = affixFST(prefixes);
this.suffixes = affixFST(suffixes);
int totalChars = 0;
for (String strip : seenStrips.keySet()) {
totalChars += strip.length();
}
stripData = new char[totalChars];
stripOffsets = new int[seenStrips.size() + 1];
int currentOffset = 0;
int currentIndex = 0;
for (String strip : seenStrips.keySet()) {
stripOffsets[currentIndex++] = currentOffset;
strip.getChars(0, strip.length(), stripData, currentOffset);
currentOffset += strip.length();
}
assert currentIndex == seenStrips.size();
stripOffsets[currentIndex] = currentOffset;
}
private FST<IntsRef> affixFST(TreeMap<String, List<Integer>> affixes) throws IOException {
IntSequenceOutputs outputs = IntSequenceOutputs.getSingleton();
FSTCompiler<IntsRef> fstCompiler = new FSTCompiler<>(FST.INPUT_TYPE.BYTE4, outputs);
IntsRefBuilder scratch = new IntsRefBuilder();
for (Map.Entry<String, List<Integer>> entry : affixes.entrySet()) {
Util.toUTF32(entry.getKey(), scratch);
List<Integer> entries = entry.getValue();
IntsRef output = new IntsRef(entries.size());
for (Integer c : entries) {
output.ints[output.length++] = c;
}
fstCompiler.add(scratch.get(), output);
}
return fstCompiler.compile();
}
static String escapeDash(String re) {
// we have to be careful, even though dash doesn't have a special meaning,
// some dictionaries already escape it (e.g. pt_PT), so we don't want to nullify it
StringBuilder escaped = new StringBuilder();
for (int i = 0; i < re.length(); i++) {
char c = re.charAt(i);
if (c == '-') {
escaped.append("\\-");
} else {
escaped.append(c);
if (c == '\\' && i + 1 < re.length()) {
escaped.append(re.charAt(i + 1));
i++;
}
}
}
return escaped.toString();
}
/**
* Parses a specific affix rule putting the result into the provided affix map
*
* @param affixes Map where the result of the parsing will be put
* @param header Header line of the affix rule
* @param reader BufferedReader to read the content of the rule from
* @param conditionPattern {@link String#format(String, Object...)} pattern to be used to generate
* the condition regex pattern
* @param seenPatterns map from condition -> index of patterns, for deduplication.
* @throws IOException Can be thrown while reading the rule
*/
private void parseAffix(
TreeMap<String, List<Integer>> affixes,
String header,
LineNumberReader reader,
String conditionPattern,
Map<String, Integer> seenPatterns,
Map<String, Integer> seenStrips)
throws IOException, ParseException {
BytesRefBuilder scratch = new BytesRefBuilder();
StringBuilder sb = new StringBuilder();
String args[] = header.split("\\s+");
boolean crossProduct = args[2].equals("Y");
boolean isSuffix = conditionPattern == SUFFIX_CONDITION_REGEX_PATTERN;
int numLines = Integer.parseInt(args[3]);
affixData = ArrayUtil.grow(affixData, (currentAffix << 3) + (numLines << 3));
ByteArrayDataOutput affixWriter =
new ByteArrayDataOutput(affixData, currentAffix << 3, numLines << 3);
for (int i = 0; i < numLines; i++) {
assert affixWriter.getPosition() == currentAffix << 3;
String line = reader.readLine();
String ruleArgs[] = line.split("\\s+");
// from the manpage: PFX flag stripping prefix [condition [morphological_fields...]]
// condition is optional
if (ruleArgs.length < 4) {
throw new ParseException(
"The affix file contains a rule with less than four elements: " + line,
reader.getLineNumber());
}
char flag = flagParsingStrategy.parseFlag(ruleArgs[1]);
String strip = ruleArgs[2].equals("0") ? "" : ruleArgs[2];
String affixArg = ruleArgs[3];
char appendFlags[] = null;
// first: parse continuation classes out of affix
int flagSep = affixArg.lastIndexOf('/');
if (flagSep != -1) {
String flagPart = affixArg.substring(flagSep + 1);
affixArg = affixArg.substring(0, flagSep);
if (aliasCount > 0) {
flagPart = getAliasValue(Integer.parseInt(flagPart));
}
appendFlags = flagParsingStrategy.parseFlags(flagPart);
Arrays.sort(appendFlags);
twoStageAffix = true;
}
// zero affix -> empty string
if ("0".equals(affixArg)) {
affixArg = "";
}
String condition = ruleArgs.length > 4 ? ruleArgs[4] : ".";
// at least the gascon affix file has this issue
if (condition.startsWith("[") && condition.indexOf(']') == -1) {
condition = condition + "]";
}
// "dash hasn't got special meaning" (we must escape it)
if (condition.indexOf('-') >= 0) {
condition = escapeDash(condition);
}
final String regex;
if (".".equals(condition)) {
regex = ".*"; // Zero condition is indicated by dot
} else if (condition.equals(strip)) {
regex = ".*"; // TODO: optimize this better:
// if we remove 'strip' from condition, we don't have to append 'strip' to check it...!
// but this is complicated...
} else {
regex = String.format(Locale.ROOT, conditionPattern, condition);
}
// deduplicate patterns
Integer patternIndex = seenPatterns.get(regex);
if (patternIndex == null) {
patternIndex = patterns.size();
if (patternIndex > Short.MAX_VALUE) {
throw new UnsupportedOperationException(
"Too many patterns, please report this to [email protected]");
}
seenPatterns.put(regex, patternIndex);
CharacterRunAutomaton pattern =
new CharacterRunAutomaton(new RegExp(regex, RegExp.NONE).toAutomaton());
patterns.add(pattern);
}
Integer stripOrd = seenStrips.get(strip);
if (stripOrd == null) {
stripOrd = seenStrips.size();
seenStrips.put(strip, stripOrd);
if (stripOrd > Character.MAX_VALUE) {
throw new UnsupportedOperationException(
"Too many unique strips, please report this to [email protected]");
}
}
if (appendFlags == null) {
appendFlags = NOFLAGS;
}
encodeFlags(scratch, appendFlags);
int appendFlagsOrd = flagLookup.add(scratch.get());
if (appendFlagsOrd < 0) {
// already exists in our hash
appendFlagsOrd = (-appendFlagsOrd) - 1;
} else if (appendFlagsOrd > Short.MAX_VALUE) {
// this limit is probably flexible, but it's a good sanity check too
throw new UnsupportedOperationException(
"Too many unique append flags, please report this to [email protected]");
}
affixWriter.writeShort((short) flag);
affixWriter.writeShort((short) stripOrd.intValue());
// encode crossProduct into patternIndex
int patternOrd = patternIndex.intValue() << 1 | (crossProduct ? 1 : 0);
affixWriter.writeShort((short) patternOrd);
affixWriter.writeShort((short) appendFlagsOrd);
if (needsInputCleaning) {
CharSequence cleaned = cleanInput(affixArg, sb);
affixArg = cleaned.toString();
}
if (isSuffix) {
affixArg = new StringBuilder(affixArg).reverse().toString();
}
List<Integer> list = affixes.get(affixArg);
if (list == null) {
list = new ArrayList<>();
affixes.put(affixArg, list);
}
list.add(currentAffix);
currentAffix++;
}
}
private FST<CharsRef> parseConversions(LineNumberReader reader, int num)
throws IOException, ParseException {
Map<String, String> mappings = new TreeMap<>();
for (int i = 0; i < num; i++) {
String line = reader.readLine();
String parts[] = line.split("\\s+");
if (parts.length != 3) {
throw new ParseException("invalid syntax: " + line, reader.getLineNumber());
}
if (mappings.put(parts[1], parts[2]) != null) {
throw new IllegalStateException("duplicate mapping specified for: " + parts[1]);
}
}
Outputs<CharsRef> outputs = CharSequenceOutputs.getSingleton();
FSTCompiler<CharsRef> fstCompiler = new FSTCompiler<>(FST.INPUT_TYPE.BYTE2, outputs);
IntsRefBuilder scratchInts = new IntsRefBuilder();
for (Map.Entry<String, String> entry : mappings.entrySet()) {
Util.toUTF16(entry.getKey(), scratchInts);
fstCompiler.add(scratchInts.get(), new CharsRef(entry.getValue()));
}
return fstCompiler.compile();
}
/** pattern accepts optional BOM + SET + any whitespace */
static final Pattern ENCODING_PATTERN = Pattern.compile("^(\u00EF\u00BB\u00BF)?SET\\s+");
/**
* Parses the encoding specified in the affix file readable through the provided InputStream
*
* @param affix InputStream for reading the affix file
* @return Encoding specified in the affix file
* @throws IOException Can be thrown while reading from the InputStream
* @throws ParseException Thrown if the first non-empty non-comment line read from the file does
* not adhere to the format {@code SET <encoding>}
*/
static String getDictionaryEncoding(InputStream affix) throws IOException, ParseException {
final StringBuilder encoding = new StringBuilder();
for (; ; ) {
encoding.setLength(0);
int ch;
while ((ch = affix.read()) >= 0) {
if (ch == '\n') {
break;
}
if (ch != '\r') {
encoding.append((char) ch);
}
}
if (encoding.length() == 0
|| encoding.charAt(0) == '#'
||
// this test only at the end as ineffective but would allow lines only containing spaces:
encoding.toString().trim().length() == 0) {
if (ch < 0) {
throw new ParseException("Unexpected end of affix file.", 0);
}
continue;
}
Matcher matcher = ENCODING_PATTERN.matcher(encoding);
if (matcher.find()) {
int last = matcher.end();
return encoding.substring(last).trim();
}
}
}
static final Map<String, String> CHARSET_ALIASES =
Map.of("microsoft-cp1251", "windows-1251", "TIS620-2533", "TIS-620");
/**
* Retrieves the CharsetDecoder for the given encoding. Note, This isn't perfect as I think
* ISCII-DEVANAGARI and MICROSOFT-CP1251 etc are allowed...
*
* @param encoding Encoding to retrieve the CharsetDecoder for
* @return CharSetDecoder for the given encoding
*/
private CharsetDecoder getJavaEncoding(String encoding) {
if ("ISO8859-14".equals(encoding)) {
return new ISO8859_14Decoder();
}
String canon = CHARSET_ALIASES.get(encoding);
if (canon != null) {
encoding = canon;
}
Charset charset = Charset.forName(encoding);
return charset.newDecoder().onMalformedInput(CodingErrorAction.REPLACE);
}
/**
* Determines the appropriate {@link FlagParsingStrategy} based on the FLAG definition line taken
* from the affix file
*
* @param flagLine Line containing the flag information
* @return FlagParsingStrategy that handles parsing flags in the way specified in the FLAG
* definition
*/
static FlagParsingStrategy getFlagParsingStrategy(String flagLine) {
String parts[] = flagLine.split("\\s+");
if (parts.length != 2) {
throw new IllegalArgumentException("Illegal FLAG specification: " + flagLine);
}
String flagType = parts[1];
if (NUM_FLAG_TYPE.equals(flagType)) {
return new NumFlagParsingStrategy();
} else if (UTF8_FLAG_TYPE.equals(flagType)) {
return new SimpleFlagParsingStrategy();
} else if (LONG_FLAG_TYPE.equals(flagType)) {
return new DoubleASCIIFlagParsingStrategy();
}
throw new IllegalArgumentException("Unknown flag type: " + flagType);
}
final char FLAG_SEPARATOR = 0x1f; // flag separator after escaping
final char MORPH_SEPARATOR =
0x1e; // separator for boundary of entry (may be followed by morph data)
String unescapeEntry(String entry) {
StringBuilder sb = new StringBuilder();
int end = morphBoundary(entry);
for (int i = 0; i < end; i++) {
char ch = entry.charAt(i);
if (ch == '\\' && i + 1 < entry.length()) {
sb.append(entry.charAt(i + 1));
i++;
} else if (ch == '/') {
sb.append(FLAG_SEPARATOR);
} else if (ch == MORPH_SEPARATOR || ch == FLAG_SEPARATOR) {
// BINARY EXECUTABLES EMBEDDED IN ZULU DICTIONARIES!!!!!!!
} else {
sb.append(ch);
}
}
sb.append(MORPH_SEPARATOR);
if (end < entry.length()) {
for (int i = end; i < entry.length(); i++) {
char c = entry.charAt(i);
if (c == FLAG_SEPARATOR || c == MORPH_SEPARATOR) {
// BINARY EXECUTABLES EMBEDDED IN ZULU DICTIONARIES!!!!!!!
} else {
sb.append(c);
}
}
}
return sb.toString();
}
static int morphBoundary(String line) {
int end = indexOfSpaceOrTab(line, 0);
if (end == -1) {
return line.length();
}
while (end >= 0 && end < line.length()) {
if (line.charAt(end) == '\t'
|| end + 3 < line.length()
&& Character.isLetter(line.charAt(end + 1))
&& Character.isLetter(line.charAt(end + 2))
&& line.charAt(end + 3) == ':') {
break;
}
end = indexOfSpaceOrTab(line, end + 1);
}
if (end == -1) {
return line.length();
}
return end;
}
static int indexOfSpaceOrTab(String text, int start) {
int pos1 = text.indexOf('\t', start);
int pos2 = text.indexOf(' ', start);
if (pos1 >= 0 && pos2 >= 0) {
return Math.min(pos1, pos2);
} else {
return Math.max(pos1, pos2);
}
}
/**
* Reads the dictionary file through the provided InputStreams, building up the words map
*
* @param dictionaries InputStreams to read the dictionary file through
* @param decoder CharsetDecoder used to decode the contents of the file
* @throws IOException Can be thrown while reading from the file
*/
private void readDictionaryFiles(
Directory tempDir,
String tempFileNamePrefix,
List<InputStream> dictionaries,
CharsetDecoder decoder,
FSTCompiler<IntsRef> words)
throws IOException {
BytesRefBuilder flagsScratch = new BytesRefBuilder();
IntsRefBuilder scratchInts = new IntsRefBuilder();
StringBuilder sb = new StringBuilder();
IndexOutput unsorted = tempDir.createTempOutput(tempFileNamePrefix, "dat", IOContext.DEFAULT);
try (ByteSequencesWriter writer = new ByteSequencesWriter(unsorted)) {
for (InputStream dictionary : dictionaries) {
BufferedReader lines = new BufferedReader(new InputStreamReader(dictionary, decoder));
String line =
lines.readLine(); // first line is number of entries (approximately, sometimes)
while ((line = lines.readLine()) != null) {
// wild and unpredictable code comment rules
if (line.isEmpty()
|| line.charAt(0) == '/'
|| line.charAt(0) == '#'
|| line.charAt(0) == '\t') {
continue;
}
line = unescapeEntry(line);
// if we havent seen any stem exceptions, try to parse one
if (hasStemExceptions == false) {
int morphStart = line.indexOf(MORPH_SEPARATOR);
if (morphStart >= 0 && morphStart < line.length()) {
hasStemExceptions = parseStemException(line.substring(morphStart + 1)) != null;
}
}
if (needsInputCleaning) {
int flagSep = line.indexOf(FLAG_SEPARATOR);
if (flagSep == -1) {
flagSep = line.indexOf(MORPH_SEPARATOR);
}
if (flagSep == -1) {
CharSequence cleansed = cleanInput(line, sb);
writer.write(cleansed.toString().getBytes(StandardCharsets.UTF_8));
} else {
String text = line.substring(0, flagSep);
CharSequence cleansed = cleanInput(text, sb);
if (cleansed != sb) {
sb.setLength(0);
sb.append(cleansed);
}
sb.append(line.substring(flagSep));
writer.write(sb.toString().getBytes(StandardCharsets.UTF_8));
}
} else {
writer.write(line.getBytes(StandardCharsets.UTF_8));
}
}
}
CodecUtil.writeFooter(unsorted);
}
OfflineSorter sorter =
new OfflineSorter(
tempDir,
tempFileNamePrefix,
new Comparator<BytesRef>() {
BytesRef scratch1 = new BytesRef();
BytesRef scratch2 = new BytesRef();
@Override
public int compare(BytesRef o1, BytesRef o2) {
scratch1.bytes = o1.bytes;
scratch1.offset = o1.offset;
scratch1.length = o1.length;
for (int i = scratch1.length - 1; i >= 0; i--) {
if (scratch1.bytes[scratch1.offset + i] == FLAG_SEPARATOR
|| scratch1.bytes[scratch1.offset + i] == MORPH_SEPARATOR) {
scratch1.length = i;
break;
}
}
scratch2.bytes = o2.bytes;
scratch2.offset = o2.offset;
scratch2.length = o2.length;
for (int i = scratch2.length - 1; i >= 0; i--) {
if (scratch2.bytes[scratch2.offset + i] == FLAG_SEPARATOR
|| scratch2.bytes[scratch2.offset + i] == MORPH_SEPARATOR) {
scratch2.length = i;
break;
}
}
int cmp = scratch1.compareTo(scratch2);
if (cmp == 0) {
// tie break on whole row
return o1.compareTo(o2);
} else {
return cmp;
}
}
});
String sorted;
boolean success = false;
try {
sorted = sorter.sort(unsorted.getName());
success = true;
} finally {
if (success) {
tempDir.deleteFile(unsorted.getName());
} else {
IOUtils.deleteFilesIgnoringExceptions(tempDir, unsorted.getName());
}
}
boolean success2 = false;
try (ByteSequencesReader reader =
new ByteSequencesReader(tempDir.openChecksumInput(sorted, IOContext.READONCE), sorted)) {
// TODO: the flags themselves can be double-chars (long) or also numeric
// either way the trick is to encode them as char... but they must be parsed differently
String currentEntry = null;
IntsRefBuilder currentOrds = new IntsRefBuilder();
while (true) {
BytesRef scratch = reader.next();
if (scratch == null) {
break;
}
String line = scratch.utf8ToString();
String entry;
char wordForm[];
int end;
int flagSep = line.indexOf(FLAG_SEPARATOR);
if (flagSep == -1) {
wordForm = NOFLAGS;
end = line.indexOf(MORPH_SEPARATOR);
entry = line.substring(0, end);
} else {
end = line.indexOf(MORPH_SEPARATOR);
String flagPart = line.substring(flagSep + 1, end);
if (aliasCount > 0) {
flagPart = getAliasValue(Integer.parseInt(flagPart));
}
wordForm = flagParsingStrategy.parseFlags(flagPart);
Arrays.sort(wordForm);
entry = line.substring(0, flagSep);
}
// we possibly have morphological data
int stemExceptionID = 0;
if (hasStemExceptions && end + 1 < line.length()) {
String stemException = parseStemException(line.substring(end + 1));
if (stemException != null) {
stemExceptions = ArrayUtil.grow(stemExceptions, stemExceptionCount + 1);
stemExceptionID =
stemExceptionCount + 1; // we use '0' to indicate no exception for the form
stemExceptions[stemExceptionCount++] = stemException;
}
}
int cmp = currentEntry == null ? 1 : entry.compareTo(currentEntry);
if (cmp < 0) {
throw new IllegalArgumentException("out of order: " + entry + " < " + currentEntry);
} else {
encodeFlags(flagsScratch, wordForm);
int ord = flagLookup.add(flagsScratch.get());
if (ord < 0) {
// already exists in our hash
ord = (-ord) - 1;
}
// finalize current entry, and switch "current" if necessary
if (cmp > 0 && currentEntry != null) {
Util.toUTF32(currentEntry, scratchInts);
words.add(scratchInts.get(), currentOrds.get());
}
// swap current
if (cmp > 0 || currentEntry == null) {
currentEntry = entry;
currentOrds = new IntsRefBuilder(); // must be this way
}
if (hasStemExceptions) {
currentOrds.append(ord);
currentOrds.append(stemExceptionID);
} else {
currentOrds.append(ord);
}
}
}
// finalize last entry
Util.toUTF32(currentEntry, scratchInts);
words.add(scratchInts.get(), currentOrds.get());
success2 = true;
} finally {
if (success2) {
tempDir.deleteFile(sorted);
} else {
IOUtils.deleteFilesIgnoringExceptions(tempDir, sorted);
}
}
}
static char[] decodeFlags(BytesRef b) {
if (b.length == 0) {
return CharsRef.EMPTY_CHARS;
}
int len = b.length >>> 1;
char flags[] = new char[len];
int upto = 0;
int end = b.offset + b.length;
for (int i = b.offset; i < end; i += 2) {
flags[upto++] = (char) ((b.bytes[i] << 8) | (b.bytes[i + 1] & 0xff));
}
return flags;
}
static void encodeFlags(BytesRefBuilder b, char flags[]) {
int len = flags.length << 1;
b.grow(len);
b.clear();
for (int i = 0; i < flags.length; i++) {
int flag = flags[i];
b.append((byte) ((flag >> 8) & 0xff));
b.append((byte) (flag & 0xff));
}
}
private void parseAlias(String line) {
String ruleArgs[] = line.split("\\s+");
if (aliases == null) {
// first line should be the aliases count
final int count = Integer.parseInt(ruleArgs[1]);
aliases = new String[count];
} else {
// an alias can map to no flags
String aliasValue = ruleArgs.length == 1 ? "" : ruleArgs[1];
aliases[aliasCount++] = aliasValue;
}
}
private String getAliasValue(int id) {
try {
return aliases[id - 1];
} catch (IndexOutOfBoundsException ex) {
throw new IllegalArgumentException("Bad flag alias number:" + id, ex);
}
}
String getStemException(int id) {
return stemExceptions[id - 1];
}
private void parseMorphAlias(String line) {
if (morphAliases == null) {
// first line should be the aliases count
final int count = Integer.parseInt(line.substring(3));
morphAliases = new String[count];
} else {
String arg = line.substring(2); // leave the space
morphAliases[morphAliasCount++] = arg;
}
}
private String parseStemException(String morphData) {
// first see if it's an alias
if (morphAliasCount > 0) {
try {
int alias = Integer.parseInt(morphData.trim());
morphData = morphAliases[alias - 1];
} catch (NumberFormatException e) {
// fine
}
}
// try to parse morph entry
int index = morphData.indexOf(" st:");
if (index < 0) {
index = morphData.indexOf("\tst:");
}
if (index >= 0) {
int endIndex = indexOfSpaceOrTab(morphData, index + 1);
if (endIndex < 0) {
endIndex = morphData.length();
}
return morphData.substring(index + 4, endIndex);
}
return null;
}
/** Abstraction of the process of parsing flags taken from the affix and dic files */
abstract static class FlagParsingStrategy {
/**
* Parses the given String into a single flag
*
* @param rawFlag String to parse into a flag
* @return Parsed flag
*/
char parseFlag(String rawFlag) {
char flags[] = parseFlags(rawFlag);
if (flags.length != 1) {
throw new IllegalArgumentException("expected only one flag, got: " + rawFlag);
}
return flags[0];
}
/**
* Parses the given String into multiple flags
*
* @param rawFlags String to parse into flags
* @return Parsed flags
*/
abstract char[] parseFlags(String rawFlags);
}
/**
* Simple implementation of {@link FlagParsingStrategy} that treats the chars in each String as a
* individual flags. Can be used with both the ASCII and UTF-8 flag types.
*/
private static class SimpleFlagParsingStrategy extends FlagParsingStrategy {
@Override
public char[] parseFlags(String rawFlags) {
return rawFlags.toCharArray();
}
}
/**
* Implementation of {@link FlagParsingStrategy} that assumes each flag is encoded in its
* numerical form. In the case of multiple flags, each number is separated by a comma.
*/
private static class NumFlagParsingStrategy extends FlagParsingStrategy {
@Override
public char[] parseFlags(String rawFlags) {
String[] rawFlagParts = rawFlags.trim().split(",");
char[] flags = new char[rawFlagParts.length];
int upto = 0;
for (int i = 0; i < rawFlagParts.length; i++) {
// note, removing the trailing X/leading I for nepali... what is the rule here?!
String replacement = rawFlagParts[i].replaceAll("[^0-9]", "");
// note, ignoring empty flags (this happens in danish, for example)
if (replacement.isEmpty()) {
continue;
}
flags[upto++] = (char) Integer.parseInt(replacement);
}
if (upto < flags.length) {
flags = ArrayUtil.copyOfSubArray(flags, 0, upto);
}
return flags;
}
}
/**
* Implementation of {@link FlagParsingStrategy} that assumes each flag is encoded as two ASCII
* characters whose codes must be combined into a single character.
*/
private static class DoubleASCIIFlagParsingStrategy extends FlagParsingStrategy {
@Override
public char[] parseFlags(String rawFlags) {
if (rawFlags.length() == 0) {
return new char[0];
}
StringBuilder builder = new StringBuilder();
if (rawFlags.length() % 2 == 1) {
throw new IllegalArgumentException(
"Invalid flags (should be even number of characters): " + rawFlags);
}
for (int i = 0; i < rawFlags.length(); i += 2) {
char f1 = rawFlags.charAt(i);
char f2 = rawFlags.charAt(i + 1);
if (f1 >= 256 || f2 >= 256) {
throw new IllegalArgumentException(
"Invalid flags (LONG flags must be double ASCII): " + rawFlags);
}
char combined = (char) (f1 << 8 | f2);
builder.append(combined);
}
char flags[] = new char[builder.length()];
builder.getChars(0, builder.length(), flags, 0);
return flags;
}
}
static boolean hasFlag(char flags[], char flag) {
return Arrays.binarySearch(flags, flag) >= 0;
}
CharSequence cleanInput(CharSequence input, StringBuilder reuse) {
reuse.setLength(0);
for (int i = 0; i < input.length(); i++) {
char ch = input.charAt(i);
if (ignore != null && Arrays.binarySearch(ignore, ch) >= 0) {
continue;
}
if (ignoreCase && iconv == null) {
// if we have no input conversion mappings, do this on-the-fly
ch = caseFold(ch);
}
reuse.append(ch);
}
if (iconv != null) {
try {
applyMappings(iconv, reuse);
} catch (IOException bogus) {
throw new RuntimeException(bogus);
}
if (ignoreCase) {
for (int i = 0; i < reuse.length(); i++) {
reuse.setCharAt(i, caseFold(reuse.charAt(i)));
}
}
}
return reuse;
}
/** folds single character (according to LANG if present) */
char caseFold(char c) {
if (alternateCasing) {
if (c == 'I') {
return 'ı';
} else if (c == 'İ') {
return 'i';
} else {
return Character.toLowerCase(c);
}
} else {
return Character.toLowerCase(c);
}
}
// TODO: this could be more efficient!
static void applyMappings(FST<CharsRef> fst, StringBuilder sb) throws IOException {
final FST.BytesReader bytesReader = fst.getBytesReader();
final FST.Arc<CharsRef> firstArc = fst.getFirstArc(new FST.Arc<CharsRef>());
final CharsRef NO_OUTPUT = fst.outputs.getNoOutput();
// temporary stuff
final FST.Arc<CharsRef> arc = new FST.Arc<>();
int longestMatch;
CharsRef longestOutput;
for (int i = 0; i < sb.length(); i++) {
arc.copyFrom(firstArc);
CharsRef output = NO_OUTPUT;
longestMatch = -1;
longestOutput = null;
for (int j = i; j < sb.length(); j++) {
char ch = sb.charAt(j);
if (fst.findTargetArc(ch, arc, arc, bytesReader) == null) {
break;
} else {
output = fst.outputs.add(output, arc.output());
}
if (arc.isFinal()) {
longestOutput = fst.outputs.add(output, arc.nextFinalOutput());
longestMatch = j;
}
}
if (longestMatch >= 0) {
sb.delete(i, longestMatch + 1);
sb.insert(i, longestOutput);
i += (longestOutput.length - 1);
}
}
}
/** Returns true if this dictionary was constructed with the {@code ignoreCase} option */
public boolean getIgnoreCase() {
return ignoreCase;
}
private static Path DEFAULT_TEMP_DIR;
/** Used by test framework */
public static void setDefaultTempDir(Path tempDir) {
DEFAULT_TEMP_DIR = tempDir;
}
/**
* Returns the default temporary directory. By default, java.io.tmpdir. If not accessible or not
* available, an IOException is thrown
*/
static synchronized Path getDefaultTempDir() throws IOException {
if (DEFAULT_TEMP_DIR == null) {
// Lazy init
String tempDirPath = System.getProperty("java.io.tmpdir");
if (tempDirPath == null) {
throw new IOException("Java has no temporary folder property (java.io.tmpdir)?");
}
Path tempDirectory = Paths.get(tempDirPath);
if (Files.isWritable(tempDirectory) == false) {
throw new IOException(
"Java's temporary folder not present or writeable?: " + tempDirectory.toAbsolutePath());
}
DEFAULT_TEMP_DIR = tempDirectory;
}
return DEFAULT_TEMP_DIR;
}
}
| 1 | 39,406 | Did you run gradlew tidy? Wildcard imports shouldn't be there, hence the question. | apache-lucene-solr | java |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.